1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
61 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
69 { ICE_PROTOCOL_LAST, 0 },
73 u8 dummy_gre_tcp_packet[] = {
74 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
78 0x08, 0x00, /* ICE_ETYPE_OL 12 */
80 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x2F, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
86 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
87 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
90 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x00, 0x00, 0x00,
94 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
103 0x50, 0x02, 0x20, 0x00,
104 0x00, 0x00, 0x00, 0x00
108 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
110 { ICE_ETYPE_OL, 12 },
111 { ICE_IPV4_OFOS, 14 },
115 { ICE_UDP_ILOS, 76 },
116 { ICE_PROTOCOL_LAST, 0 },
120 u8 dummy_gre_udp_packet[] = {
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x08, 0x00, /* ICE_ETYPE_OL 12 */
127 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x2F, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
133 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
141 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
148 0x00, 0x08, 0x00, 0x00,
152 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
154 { ICE_ETYPE_OL, 12 },
155 { ICE_IPV4_OFOS, 14 },
162 { ICE_PROTOCOL_LAST, 0 },
166 u8 dummy_udp_tun_tcp_packet[] = {
167 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
168 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00,
171 0x08, 0x00, /* ICE_ETYPE_OL 12 */
173 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
174 0x00, 0x01, 0x00, 0x00,
175 0x40, 0x11, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
180 0x00, 0x46, 0x00, 0x00,
182 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
183 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
186 0x00, 0x00, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00,
190 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
191 0x00, 0x01, 0x00, 0x00,
192 0x40, 0x06, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
197 0x00, 0x00, 0x00, 0x00,
198 0x00, 0x00, 0x00, 0x00,
199 0x50, 0x02, 0x20, 0x00,
200 0x00, 0x00, 0x00, 0x00
204 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
206 { ICE_ETYPE_OL, 12 },
207 { ICE_IPV4_OFOS, 14 },
213 { ICE_UDP_ILOS, 84 },
214 { ICE_PROTOCOL_LAST, 0 },
218 u8 dummy_udp_tun_udp_packet[] = {
219 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x08, 0x00, /* ICE_ETYPE_OL 12 */
225 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
226 0x00, 0x01, 0x00, 0x00,
227 0x00, 0x11, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
229 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
232 0x00, 0x3a, 0x00, 0x00,
234 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
235 0x00, 0x00, 0x00, 0x00,
237 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
238 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00,
242 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
243 0x00, 0x01, 0x00, 0x00,
244 0x00, 0x11, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00,
246 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
249 0x00, 0x08, 0x00, 0x00,
253 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
255 { ICE_ETYPE_OL, 12 },
256 { ICE_IPV4_OFOS, 14 },
257 { ICE_UDP_ILOS, 34 },
258 { ICE_PROTOCOL_LAST, 0 },
262 dummy_udp_packet[] = {
263 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
264 0x00, 0x00, 0x00, 0x00,
265 0x00, 0x00, 0x00, 0x00,
267 0x08, 0x00, /* ICE_ETYPE_OL 12 */
269 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
270 0x00, 0x01, 0x00, 0x00,
271 0x00, 0x11, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00,
273 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
276 0x00, 0x08, 0x00, 0x00,
278 0x00, 0x00, /* 2 bytes for 4 byte alignment */
282 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
284 { ICE_ETYPE_OL, 12 },
285 { ICE_IPV4_OFOS, 14 },
287 { ICE_PROTOCOL_LAST, 0 },
291 dummy_tcp_packet[] = {
292 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
293 0x00, 0x00, 0x00, 0x00,
294 0x00, 0x00, 0x00, 0x00,
296 0x08, 0x00, /* ICE_ETYPE_OL 12 */
298 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
299 0x00, 0x01, 0x00, 0x00,
300 0x00, 0x06, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
305 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00,
307 0x50, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, /* 2 bytes for 4 byte alignment */
314 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
316 { ICE_ETYPE_OL, 12 },
317 { ICE_IPV6_OFOS, 14 },
319 { ICE_PROTOCOL_LAST, 0 },
323 dummy_tcp_ipv6_packet[] = {
324 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
325 0x00, 0x00, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00,
328 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
330 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
331 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
332 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
342 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00,
344 0x50, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, /* 2 bytes for 4 byte alignment */
351 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
353 { ICE_ETYPE_OL, 12 },
354 { ICE_IPV6_OFOS, 14 },
355 { ICE_UDP_ILOS, 54 },
356 { ICE_PROTOCOL_LAST, 0 },
360 dummy_udp_ipv6_packet[] = {
361 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
362 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
365 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
367 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
368 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
379 0x00, 0x08, 0x00, 0x00,
381 0x00, 0x00, /* 2 bytes for 4 byte alignment */
385 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
387 { ICE_IPV4_OFOS, 14 },
390 { ICE_PROTOCOL_LAST, 0 },
394 dummy_udp_gtp_packet[] = {
395 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
400 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x11, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
407 0x00, 0x1c, 0x00, 0x00,
409 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
410 0x00, 0x00, 0x00, 0x00,
411 0x00, 0x00, 0x00, 0x85,
413 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
414 0x00, 0x00, 0x00, 0x00,
418 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
421 { ICE_PROTOCOL_LAST, 0 },
425 dummy_pppoe_packet[] = {
426 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
427 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, 0x00, 0x00,
431 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 14 */
432 0x00, 0x4e, 0x00, 0x21,
434 0x45, 0x00, 0x00, 0x30, /* PDU */
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x11, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
441 /* this is a recipe to profile association bitmap */
442 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
443 ICE_MAX_NUM_PROFILES);
445 /* this is a profile to recipe association bitmap */
446 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
447 ICE_MAX_NUM_RECIPES);
449 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
452 * ice_collect_result_idx - copy result index values
453 * @buf: buffer that contains the result index
454 * @recp: the recipe struct to copy data into
456 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
457 struct ice_sw_recipe *recp)
459 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
460 ice_set_bit(buf->content.result_indx &
461 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
465 * ice_init_possible_res_bm - initialize possible result bitmap
466 * @pos_result_bm: pointer to the bitmap to initialize
468 static void ice_init_possible_res_bm(ice_bitmap_t *pos_result_bm)
472 ice_zero_bitmap(pos_result_bm, ICE_MAX_FV_WORDS);
474 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
475 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
476 ice_set_bit(bit, pos_result_bm);
480 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
481 * @hw: pointer to hardware structure
482 * @recps: struct that we need to populate
483 * @rid: recipe ID that we are populating
484 * @refresh_required: true if we should get recipe to profile mapping from FW
486 * This function is used to populate all the necessary entries into our
487 * bookkeeping so that we have a current list of all the recipes that are
488 * programmed in the firmware.
490 static enum ice_status
491 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
492 bool *refresh_required)
494 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
495 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
496 struct ice_aqc_recipe_data_elem *tmp;
497 u16 num_recps = ICE_MAX_NUM_RECIPES;
498 struct ice_prot_lkup_ext *lkup_exts;
499 u16 i, sub_recps, fv_word_idx = 0;
500 enum ice_status status;
502 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
503 ice_init_possible_res_bm(possible_idx);
505 /* we need a buffer big enough to accommodate all the recipes */
506 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
507 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
509 return ICE_ERR_NO_MEMORY;
511 tmp[0].recipe_indx = rid;
512 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
513 /* non-zero status meaning recipe doesn't exist */
517 /* Get recipe to profile map so that we can get the fv from lkups that
518 * we read for a recipe from FW. Since we want to minimize the number of
519 * times we make this FW call, just make one call and cache the copy
520 * until a new recipe is added. This operation is only required the
521 * first time to get the changes from FW. Then to search existing
522 * entries we don't need to update the cache again until another recipe
525 if (*refresh_required) {
526 ice_get_recp_to_prof_map(hw);
527 *refresh_required = false;
530 /* Start populating all the entries for recps[rid] based on lkups from
531 * firmware. Note that we are only creating the root recipe in our
534 lkup_exts = &recps[rid].lkup_exts;
536 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
537 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
538 struct ice_recp_grp_entry *rg_entry;
539 u8 prof_id, idx, prot = 0;
543 rg_entry = (struct ice_recp_grp_entry *)
544 ice_malloc(hw, sizeof(*rg_entry));
546 status = ICE_ERR_NO_MEMORY;
550 idx = root_bufs.recipe_indx;
551 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
553 /* Mark all result indices in this chain */
554 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
555 ice_set_bit(root_bufs.content.result_indx &
556 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
558 /* get the first profile that is associated with rid */
559 prof_id = ice_find_first_bit(recipe_to_profile[idx],
560 ICE_MAX_NUM_PROFILES);
561 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
562 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
564 rg_entry->fv_idx[i] = lkup_indx;
565 rg_entry->fv_mask[i] =
566 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
568 /* If the recipe is a chained recipe then all its
569 * child recipe's result will have a result index.
570 * To fill fv_words we should not use those result
571 * index, we only need the protocol ids and offsets.
572 * We will skip all the fv_idx which stores result
573 * index in them. We also need to skip any fv_idx which
574 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
575 * valid offset value.
577 if (ice_is_bit_set(possible_idx, rg_entry->fv_idx[i]) ||
578 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
579 rg_entry->fv_idx[i] == 0)
582 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
583 rg_entry->fv_idx[i], &prot, &off);
584 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
585 lkup_exts->fv_words[fv_word_idx].off = off;
588 /* populate rg_list with the data from the child entry of this
591 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
593 /* Propagate some data to the recipe database */
594 recps[idx].is_root = is_root;
595 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
596 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
597 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
598 recps[idx].chain_idx = root_bufs.content.result_indx &
599 ~ICE_AQ_RECIPE_RESULT_EN;
600 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
602 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
608 /* Only do the following for root recipes entries */
609 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
610 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
611 recps[idx].root_rid = root_bufs.content.rid &
612 ~ICE_AQ_RECIPE_ID_IS_ROOT;
613 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
616 /* Complete initialization of the root recipe entry */
617 lkup_exts->n_val_words = fv_word_idx;
618 recps[rid].big_recp = (num_recps > 1);
619 recps[rid].n_grp_count = num_recps;
620 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
621 ice_calloc(hw, recps[rid].n_grp_count,
622 sizeof(struct ice_aqc_recipe_data_elem));
623 if (!recps[rid].root_buf)
626 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
627 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
629 /* Copy result indexes */
630 ice_memcpy(recps[rid].res_idxs, result_bm, sizeof(recps[rid].res_idxs),
631 ICE_NONDMA_TO_NONDMA);
632 recps[rid].recp_created = true;
640 * ice_get_recp_to_prof_map - updates recipe to profile mapping
641 * @hw: pointer to hardware structure
643 * This function is used to populate recipe_to_profile matrix where index to
644 * this array is the recipe ID and the element is the mapping of which profiles
645 * is this recipe mapped to.
648 ice_get_recp_to_prof_map(struct ice_hw *hw)
650 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
653 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
656 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
657 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
658 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
660 ice_memcpy(profile_to_recipe[i], r_bitmap,
661 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
662 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
663 if (ice_is_bit_set(r_bitmap, j))
664 ice_set_bit(i, recipe_to_profile[j]);
669 * ice_init_def_sw_recp - initialize the recipe book keeping tables
670 * @hw: pointer to the HW struct
672 * Allocate memory for the entire recipe table and initialize the structures/
673 * entries corresponding to basic recipes.
675 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
677 struct ice_sw_recipe *recps;
680 recps = (struct ice_sw_recipe *)
681 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
683 return ICE_ERR_NO_MEMORY;
685 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
686 recps[i].root_rid = i;
687 INIT_LIST_HEAD(&recps[i].filt_rules);
688 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
689 INIT_LIST_HEAD(&recps[i].rg_list);
690 ice_init_lock(&recps[i].filt_rule_lock);
693 hw->switch_info->recp_list = recps;
699 * ice_aq_get_sw_cfg - get switch configuration
700 * @hw: pointer to the hardware structure
701 * @buf: pointer to the result buffer
702 * @buf_size: length of the buffer available for response
703 * @req_desc: pointer to requested descriptor
704 * @num_elems: pointer to number of elements
705 * @cd: pointer to command details structure or NULL
707 * Get switch configuration (0x0200) to be placed in 'buff'.
708 * This admin command returns information such as initial VSI/port number
709 * and switch ID it belongs to.
711 * NOTE: *req_desc is both an input/output parameter.
712 * The caller of this function first calls this function with *request_desc set
713 * to 0. If the response from f/w has *req_desc set to 0, all the switch
714 * configuration information has been returned; if non-zero (meaning not all
715 * the information was returned), the caller should call this function again
716 * with *req_desc set to the previous value returned by f/w to get the
717 * next block of switch configuration information.
719 * *num_elems is output only parameter. This reflects the number of elements
720 * in response buffer. The caller of this function to use *num_elems while
721 * parsing the response buffer.
723 static enum ice_status
724 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
725 u16 buf_size, u16 *req_desc, u16 *num_elems,
726 struct ice_sq_cd *cd)
728 struct ice_aqc_get_sw_cfg *cmd;
729 enum ice_status status;
730 struct ice_aq_desc desc;
732 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
733 cmd = &desc.params.get_sw_conf;
734 cmd->element = CPU_TO_LE16(*req_desc);
736 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
738 *req_desc = LE16_TO_CPU(cmd->element);
739 *num_elems = LE16_TO_CPU(cmd->num_elems);
746 * ice_alloc_sw - allocate resources specific to switch
747 * @hw: pointer to the HW struct
748 * @ena_stats: true to turn on VEB stats
749 * @shared_res: true for shared resource, false for dedicated resource
750 * @sw_id: switch ID returned
751 * @counter_id: VEB counter ID returned
753 * allocates switch resources (SWID and VEB counter) (0x0208)
756 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
759 struct ice_aqc_alloc_free_res_elem *sw_buf;
760 struct ice_aqc_res_elem *sw_ele;
761 enum ice_status status;
764 buf_len = sizeof(*sw_buf);
765 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
766 ice_malloc(hw, buf_len);
768 return ICE_ERR_NO_MEMORY;
770 /* Prepare buffer for switch ID.
771 * The number of resource entries in buffer is passed as 1 since only a
772 * single switch/VEB instance is allocated, and hence a single sw_id
775 sw_buf->num_elems = CPU_TO_LE16(1);
777 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
778 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
779 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
781 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
782 ice_aqc_opc_alloc_res, NULL);
785 goto ice_alloc_sw_exit;
787 sw_ele = &sw_buf->elem[0];
788 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
791 /* Prepare buffer for VEB Counter */
792 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
793 struct ice_aqc_alloc_free_res_elem *counter_buf;
794 struct ice_aqc_res_elem *counter_ele;
796 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
797 ice_malloc(hw, buf_len);
799 status = ICE_ERR_NO_MEMORY;
800 goto ice_alloc_sw_exit;
803 /* The number of resource entries in buffer is passed as 1 since
804 * only a single switch/VEB instance is allocated, and hence a
805 * single VEB counter is requested.
807 counter_buf->num_elems = CPU_TO_LE16(1);
808 counter_buf->res_type =
809 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
810 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
811 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
815 ice_free(hw, counter_buf);
816 goto ice_alloc_sw_exit;
818 counter_ele = &counter_buf->elem[0];
819 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
820 ice_free(hw, counter_buf);
824 ice_free(hw, sw_buf);
829 * ice_free_sw - free resources specific to switch
830 * @hw: pointer to the HW struct
831 * @sw_id: switch ID returned
832 * @counter_id: VEB counter ID returned
834 * free switch resources (SWID and VEB counter) (0x0209)
836 * NOTE: This function frees multiple resources. It continues
837 * releasing other resources even after it encounters error.
838 * The error code returned is the last error it encountered.
840 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
842 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
843 enum ice_status status, ret_status;
846 buf_len = sizeof(*sw_buf);
847 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
848 ice_malloc(hw, buf_len);
850 return ICE_ERR_NO_MEMORY;
852 /* Prepare buffer to free for switch ID res.
853 * The number of resource entries in buffer is passed as 1 since only a
854 * single switch/VEB instance is freed, and hence a single sw_id
857 sw_buf->num_elems = CPU_TO_LE16(1);
858 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
859 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
861 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
862 ice_aqc_opc_free_res, NULL);
865 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
867 /* Prepare buffer to free for VEB Counter resource */
868 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
869 ice_malloc(hw, buf_len);
871 ice_free(hw, sw_buf);
872 return ICE_ERR_NO_MEMORY;
875 /* The number of resource entries in buffer is passed as 1 since only a
876 * single switch/VEB instance is freed, and hence a single VEB counter
879 counter_buf->num_elems = CPU_TO_LE16(1);
880 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
881 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
883 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
884 ice_aqc_opc_free_res, NULL);
886 ice_debug(hw, ICE_DBG_SW,
887 "VEB counter resource could not be freed\n");
891 ice_free(hw, counter_buf);
892 ice_free(hw, sw_buf);
898 * @hw: pointer to the HW struct
899 * @vsi_ctx: pointer to a VSI context struct
900 * @cd: pointer to command details structure or NULL
902 * Add a VSI context to the hardware (0x0210)
905 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
906 struct ice_sq_cd *cd)
908 struct ice_aqc_add_update_free_vsi_resp *res;
909 struct ice_aqc_add_get_update_free_vsi *cmd;
910 struct ice_aq_desc desc;
911 enum ice_status status;
913 cmd = &desc.params.vsi_cmd;
914 res = &desc.params.add_update_free_vsi_res;
916 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
918 if (!vsi_ctx->alloc_from_pool)
919 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
920 ICE_AQ_VSI_IS_VALID);
922 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
924 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
926 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
927 sizeof(vsi_ctx->info), cd);
930 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
931 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
932 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
940 * @hw: pointer to the HW struct
941 * @vsi_ctx: pointer to a VSI context struct
942 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
943 * @cd: pointer to command details structure or NULL
945 * Free VSI context info from hardware (0x0213)
948 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
949 bool keep_vsi_alloc, struct ice_sq_cd *cd)
951 struct ice_aqc_add_update_free_vsi_resp *resp;
952 struct ice_aqc_add_get_update_free_vsi *cmd;
953 struct ice_aq_desc desc;
954 enum ice_status status;
956 cmd = &desc.params.vsi_cmd;
957 resp = &desc.params.add_update_free_vsi_res;
959 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
961 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
963 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
965 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
967 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
968 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
976 * @hw: pointer to the HW struct
977 * @vsi_ctx: pointer to a VSI context struct
978 * @cd: pointer to command details structure or NULL
980 * Update VSI context in the hardware (0x0211)
983 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
984 struct ice_sq_cd *cd)
986 struct ice_aqc_add_update_free_vsi_resp *resp;
987 struct ice_aqc_add_get_update_free_vsi *cmd;
988 struct ice_aq_desc desc;
989 enum ice_status status;
991 cmd = &desc.params.vsi_cmd;
992 resp = &desc.params.add_update_free_vsi_res;
994 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
996 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
998 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1000 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1001 sizeof(vsi_ctx->info), cd);
1004 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1005 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1012 * ice_is_vsi_valid - check whether the VSI is valid or not
1013 * @hw: pointer to the HW struct
1014 * @vsi_handle: VSI handle
1016 * check whether the VSI is valid or not
1018 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1020 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1024 * ice_get_hw_vsi_num - return the HW VSI number
1025 * @hw: pointer to the HW struct
1026 * @vsi_handle: VSI handle
1028 * return the HW VSI number
1029 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1031 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1033 return hw->vsi_ctx[vsi_handle]->vsi_num;
1037 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1038 * @hw: pointer to the HW struct
1039 * @vsi_handle: VSI handle
1041 * return the VSI context entry for a given VSI handle
1043 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1045 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1049 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1050 * @hw: pointer to the HW struct
1051 * @vsi_handle: VSI handle
1052 * @vsi: VSI context pointer
1054 * save the VSI context entry for a given VSI handle
1057 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1059 hw->vsi_ctx[vsi_handle] = vsi;
1063 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1064 * @hw: pointer to the HW struct
1065 * @vsi_handle: VSI handle
1067 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1069 struct ice_vsi_ctx *vsi;
1072 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1075 ice_for_each_traffic_class(i) {
1076 if (vsi->lan_q_ctx[i]) {
1077 ice_free(hw, vsi->lan_q_ctx[i]);
1078 vsi->lan_q_ctx[i] = NULL;
1084 * ice_clear_vsi_ctx - clear the VSI context entry
1085 * @hw: pointer to the HW struct
1086 * @vsi_handle: VSI handle
1088 * clear the VSI context entry
1090 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1092 struct ice_vsi_ctx *vsi;
1094 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1096 ice_clear_vsi_q_ctx(hw, vsi_handle);
1098 hw->vsi_ctx[vsi_handle] = NULL;
1103 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1104 * @hw: pointer to the HW struct
1106 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1110 for (i = 0; i < ICE_MAX_VSI; i++)
1111 ice_clear_vsi_ctx(hw, i);
1115 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1116 * @hw: pointer to the HW struct
1117 * @vsi_handle: unique VSI handle provided by drivers
1118 * @vsi_ctx: pointer to a VSI context struct
1119 * @cd: pointer to command details structure or NULL
1121 * Add a VSI context to the hardware also add it into the VSI handle list.
1122 * If this function gets called after reset for existing VSIs then update
1123 * with the new HW VSI number in the corresponding VSI handle list entry.
1126 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1127 struct ice_sq_cd *cd)
1129 struct ice_vsi_ctx *tmp_vsi_ctx;
1130 enum ice_status status;
1132 if (vsi_handle >= ICE_MAX_VSI)
1133 return ICE_ERR_PARAM;
1134 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1137 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1139 /* Create a new VSI context */
1140 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1141 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1143 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1144 return ICE_ERR_NO_MEMORY;
1146 *tmp_vsi_ctx = *vsi_ctx;
1148 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1150 /* update with new HW VSI num */
1151 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1158 * ice_free_vsi- free VSI context from hardware and VSI handle list
1159 * @hw: pointer to the HW struct
1160 * @vsi_handle: unique VSI handle
1161 * @vsi_ctx: pointer to a VSI context struct
1162 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1163 * @cd: pointer to command details structure or NULL
1165 * Free VSI context info from hardware as well as from VSI handle list
1168 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1169 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1171 enum ice_status status;
1173 if (!ice_is_vsi_valid(hw, vsi_handle))
1174 return ICE_ERR_PARAM;
1175 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1176 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1178 ice_clear_vsi_ctx(hw, vsi_handle);
1184 * @hw: pointer to the HW struct
1185 * @vsi_handle: unique VSI handle
1186 * @vsi_ctx: pointer to a VSI context struct
1187 * @cd: pointer to command details structure or NULL
1189 * Update VSI context in the hardware
1192 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1193 struct ice_sq_cd *cd)
1195 if (!ice_is_vsi_valid(hw, vsi_handle))
1196 return ICE_ERR_PARAM;
1197 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1198 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1202 * ice_aq_get_vsi_params
1203 * @hw: pointer to the HW struct
1204 * @vsi_ctx: pointer to a VSI context struct
1205 * @cd: pointer to command details structure or NULL
1207 * Get VSI context info from hardware (0x0212)
1210 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1211 struct ice_sq_cd *cd)
1213 struct ice_aqc_add_get_update_free_vsi *cmd;
1214 struct ice_aqc_get_vsi_resp *resp;
1215 struct ice_aq_desc desc;
1216 enum ice_status status;
1218 cmd = &desc.params.vsi_cmd;
1219 resp = &desc.params.get_vsi_resp;
1221 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1223 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1225 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1226 sizeof(vsi_ctx->info), cd);
1228 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1230 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1231 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1238 * ice_aq_add_update_mir_rule - add/update a mirror rule
1239 * @hw: pointer to the HW struct
1240 * @rule_type: Rule Type
1241 * @dest_vsi: VSI number to which packets will be mirrored
1242 * @count: length of the list
1243 * @mr_buf: buffer for list of mirrored VSI numbers
1244 * @cd: pointer to command details structure or NULL
1247 * Add/Update Mirror Rule (0x260).
1250 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1251 u16 count, struct ice_mir_rule_buf *mr_buf,
1252 struct ice_sq_cd *cd, u16 *rule_id)
1254 struct ice_aqc_add_update_mir_rule *cmd;
1255 struct ice_aq_desc desc;
1256 enum ice_status status;
1257 __le16 *mr_list = NULL;
1260 switch (rule_type) {
1261 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1262 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1263 /* Make sure count and mr_buf are set for these rule_types */
1264 if (!(count && mr_buf))
1265 return ICE_ERR_PARAM;
1267 buf_size = count * sizeof(__le16);
1268 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1270 return ICE_ERR_NO_MEMORY;
1272 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1273 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1274 /* Make sure count and mr_buf are not set for these
1277 if (count || mr_buf)
1278 return ICE_ERR_PARAM;
1281 ice_debug(hw, ICE_DBG_SW,
1282 "Error due to unsupported rule_type %u\n", rule_type);
1283 return ICE_ERR_OUT_OF_RANGE;
1286 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1288 /* Pre-process 'mr_buf' items for add/update of virtual port
1289 * ingress/egress mirroring (but not physical port ingress/egress
1295 for (i = 0; i < count; i++) {
1298 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1300 /* Validate specified VSI number, make sure it is less
1301 * than ICE_MAX_VSI, if not return with error.
1303 if (id >= ICE_MAX_VSI) {
1304 ice_debug(hw, ICE_DBG_SW,
1305 "Error VSI index (%u) out-of-range\n",
1307 ice_free(hw, mr_list);
1308 return ICE_ERR_OUT_OF_RANGE;
1311 /* add VSI to mirror rule */
1314 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1315 else /* remove VSI from mirror rule */
1316 mr_list[i] = CPU_TO_LE16(id);
1320 cmd = &desc.params.add_update_rule;
1321 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1322 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1323 ICE_AQC_RULE_ID_VALID_M);
1324 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1325 cmd->num_entries = CPU_TO_LE16(count);
1326 cmd->dest = CPU_TO_LE16(dest_vsi);
1328 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1330 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1332 ice_free(hw, mr_list);
1338 * ice_aq_delete_mir_rule - delete a mirror rule
1339 * @hw: pointer to the HW struct
1340 * @rule_id: Mirror rule ID (to be deleted)
1341 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1342 * otherwise it is returned to the shared pool
1343 * @cd: pointer to command details structure or NULL
1345 * Delete Mirror Rule (0x261).
1348 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1349 struct ice_sq_cd *cd)
1351 struct ice_aqc_delete_mir_rule *cmd;
1352 struct ice_aq_desc desc;
1354 /* rule_id should be in the range 0...63 */
1355 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1356 return ICE_ERR_OUT_OF_RANGE;
1358 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1360 cmd = &desc.params.del_rule;
1361 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1362 cmd->rule_id = CPU_TO_LE16(rule_id);
1365 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1367 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1371 * ice_aq_alloc_free_vsi_list
1372 * @hw: pointer to the HW struct
1373 * @vsi_list_id: VSI list ID returned or used for lookup
1374 * @lkup_type: switch rule filter lookup type
1375 * @opc: switch rules population command type - pass in the command opcode
1377 * allocates or free a VSI list resource
1379 static enum ice_status
1380 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1381 enum ice_sw_lkup_type lkup_type,
1382 enum ice_adminq_opc opc)
1384 struct ice_aqc_alloc_free_res_elem *sw_buf;
1385 struct ice_aqc_res_elem *vsi_ele;
1386 enum ice_status status;
1389 buf_len = sizeof(*sw_buf);
1390 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1391 ice_malloc(hw, buf_len);
1393 return ICE_ERR_NO_MEMORY;
1394 sw_buf->num_elems = CPU_TO_LE16(1);
1396 if (lkup_type == ICE_SW_LKUP_MAC ||
1397 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1398 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1399 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1400 lkup_type == ICE_SW_LKUP_PROMISC ||
1401 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1402 lkup_type == ICE_SW_LKUP_LAST) {
1403 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1404 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1406 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1408 status = ICE_ERR_PARAM;
1409 goto ice_aq_alloc_free_vsi_list_exit;
1412 if (opc == ice_aqc_opc_free_res)
1413 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1415 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1417 goto ice_aq_alloc_free_vsi_list_exit;
1419 if (opc == ice_aqc_opc_alloc_res) {
1420 vsi_ele = &sw_buf->elem[0];
1421 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1424 ice_aq_alloc_free_vsi_list_exit:
1425 ice_free(hw, sw_buf);
1430 * ice_aq_set_storm_ctrl - Sets storm control configuration
1431 * @hw: pointer to the HW struct
1432 * @bcast_thresh: represents the upper threshold for broadcast storm control
1433 * @mcast_thresh: represents the upper threshold for multicast storm control
1434 * @ctl_bitmask: storm control control knobs
1436 * Sets the storm control configuration (0x0280)
1439 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1442 struct ice_aqc_storm_cfg *cmd;
1443 struct ice_aq_desc desc;
1445 cmd = &desc.params.storm_conf;
1447 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1449 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1450 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1451 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1453 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1457 * ice_aq_get_storm_ctrl - gets storm control configuration
1458 * @hw: pointer to the HW struct
1459 * @bcast_thresh: represents the upper threshold for broadcast storm control
1460 * @mcast_thresh: represents the upper threshold for multicast storm control
1461 * @ctl_bitmask: storm control control knobs
1463 * Gets the storm control configuration (0x0281)
1466 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1469 enum ice_status status;
1470 struct ice_aq_desc desc;
1472 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1474 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1476 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1479 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1482 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1485 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1492 * ice_aq_sw_rules - add/update/remove switch rules
1493 * @hw: pointer to the HW struct
1494 * @rule_list: pointer to switch rule population list
1495 * @rule_list_sz: total size of the rule list in bytes
1496 * @num_rules: number of switch rules in the rule_list
1497 * @opc: switch rules population command type - pass in the command opcode
1498 * @cd: pointer to command details structure or NULL
1500 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1502 static enum ice_status
1503 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1504 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1506 struct ice_aq_desc desc;
1508 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1510 if (opc != ice_aqc_opc_add_sw_rules &&
1511 opc != ice_aqc_opc_update_sw_rules &&
1512 opc != ice_aqc_opc_remove_sw_rules)
1513 return ICE_ERR_PARAM;
1515 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1517 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1518 desc.params.sw_rules.num_rules_fltr_entry_index =
1519 CPU_TO_LE16(num_rules);
1520 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1524 * ice_aq_add_recipe - add switch recipe
1525 * @hw: pointer to the HW struct
1526 * @s_recipe_list: pointer to switch rule population list
1527 * @num_recipes: number of switch recipes in the list
1528 * @cd: pointer to command details structure or NULL
1533 ice_aq_add_recipe(struct ice_hw *hw,
1534 struct ice_aqc_recipe_data_elem *s_recipe_list,
1535 u16 num_recipes, struct ice_sq_cd *cd)
1537 struct ice_aqc_add_get_recipe *cmd;
1538 struct ice_aq_desc desc;
1541 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1542 cmd = &desc.params.add_get_recipe;
1543 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1545 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1546 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1548 buf_size = num_recipes * sizeof(*s_recipe_list);
1550 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1554 * ice_aq_get_recipe - get switch recipe
1555 * @hw: pointer to the HW struct
1556 * @s_recipe_list: pointer to switch rule population list
1557 * @num_recipes: pointer to the number of recipes (input and output)
1558 * @recipe_root: root recipe number of recipe(s) to retrieve
1559 * @cd: pointer to command details structure or NULL
1563 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1564 * On output, *num_recipes will equal the number of entries returned in
1567 * The caller must supply enough space in s_recipe_list to hold all possible
1568 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1571 ice_aq_get_recipe(struct ice_hw *hw,
1572 struct ice_aqc_recipe_data_elem *s_recipe_list,
1573 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1575 struct ice_aqc_add_get_recipe *cmd;
1576 struct ice_aq_desc desc;
1577 enum ice_status status;
1580 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1581 return ICE_ERR_PARAM;
1583 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1584 cmd = &desc.params.add_get_recipe;
1585 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1587 cmd->return_index = CPU_TO_LE16(recipe_root);
1588 cmd->num_sub_recipes = 0;
1590 buf_size = *num_recipes * sizeof(*s_recipe_list);
1592 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1593 /* cppcheck-suppress constArgument */
1594 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1600 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1601 * @hw: pointer to the HW struct
1602 * @profile_id: package profile ID to associate the recipe with
1603 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1604 * @cd: pointer to command details structure or NULL
1605 * Recipe to profile association (0x0291)
1608 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1609 struct ice_sq_cd *cd)
1611 struct ice_aqc_recipe_to_profile *cmd;
1612 struct ice_aq_desc desc;
1614 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1615 cmd = &desc.params.recipe_to_profile;
1616 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1617 cmd->profile_id = CPU_TO_LE16(profile_id);
1618 /* Set the recipe ID bit in the bitmask to let the device know which
1619 * profile we are associating the recipe to
1621 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1622 ICE_NONDMA_TO_NONDMA);
1624 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1628 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1629 * @hw: pointer to the HW struct
1630 * @profile_id: package profile ID to associate the recipe with
1631 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1632 * @cd: pointer to command details structure or NULL
1633 * Associate profile ID with given recipe (0x0293)
1636 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1637 struct ice_sq_cd *cd)
1639 struct ice_aqc_recipe_to_profile *cmd;
1640 struct ice_aq_desc desc;
1641 enum ice_status status;
1643 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1644 cmd = &desc.params.recipe_to_profile;
1645 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1646 cmd->profile_id = CPU_TO_LE16(profile_id);
1648 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1650 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1651 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1657 * ice_alloc_recipe - add recipe resource
1658 * @hw: pointer to the hardware structure
1659 * @rid: recipe ID returned as response to AQ call
1661 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1663 struct ice_aqc_alloc_free_res_elem *sw_buf;
1664 enum ice_status status;
1667 buf_len = sizeof(*sw_buf);
1668 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1670 return ICE_ERR_NO_MEMORY;
1672 sw_buf->num_elems = CPU_TO_LE16(1);
1673 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1674 ICE_AQC_RES_TYPE_S) |
1675 ICE_AQC_RES_TYPE_FLAG_SHARED);
1676 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1677 ice_aqc_opc_alloc_res, NULL);
1679 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1680 ice_free(hw, sw_buf);
1685 /* ice_init_port_info - Initialize port_info with switch configuration data
1686 * @pi: pointer to port_info
1687 * @vsi_port_num: VSI number or port number
1688 * @type: Type of switch element (port or VSI)
1689 * @swid: switch ID of the switch the element is attached to
1690 * @pf_vf_num: PF or VF number
1691 * @is_vf: true if the element is a VF, false otherwise
1694 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1695 u16 swid, u16 pf_vf_num, bool is_vf)
1698 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1699 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1701 pi->pf_vf_num = pf_vf_num;
1703 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1704 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1707 ice_debug(pi->hw, ICE_DBG_SW,
1708 "incorrect VSI/port type received\n");
1713 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1714 * @hw: pointer to the hardware structure
1716 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1718 struct ice_aqc_get_sw_cfg_resp *rbuf;
1719 enum ice_status status;
1720 u16 num_total_ports;
1726 num_total_ports = 1;
1728 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1729 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1732 return ICE_ERR_NO_MEMORY;
1734 /* Multiple calls to ice_aq_get_sw_cfg may be required
1735 * to get all the switch configuration information. The need
1736 * for additional calls is indicated by ice_aq_get_sw_cfg
1737 * writing a non-zero value in req_desc
1740 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1741 &req_desc, &num_elems, NULL);
1746 for (i = 0; i < num_elems; i++) {
1747 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1748 u16 pf_vf_num, swid, vsi_port_num;
1752 ele = rbuf[i].elements;
1753 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1754 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1756 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1757 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1759 swid = LE16_TO_CPU(ele->swid);
1761 if (LE16_TO_CPU(ele->pf_vf_num) &
1762 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1765 type = LE16_TO_CPU(ele->vsi_port_num) >>
1766 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1769 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1770 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1771 if (j == num_total_ports) {
1772 ice_debug(hw, ICE_DBG_SW,
1773 "more ports than expected\n");
1774 status = ICE_ERR_CFG;
1777 ice_init_port_info(hw->port_info,
1778 vsi_port_num, type, swid,
1786 } while (req_desc && !status);
1789 ice_free(hw, (void *)rbuf);
1794 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1795 * @hw: pointer to the hardware structure
1796 * @fi: filter info structure to fill/update
1798 * This helper function populates the lb_en and lan_en elements of the provided
1799 * ice_fltr_info struct using the switch's type and characteristics of the
1800 * switch rule being configured.
1802 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1806 if ((fi->flag & ICE_FLTR_TX) &&
1807 (fi->fltr_act == ICE_FWD_TO_VSI ||
1808 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1809 fi->fltr_act == ICE_FWD_TO_Q ||
1810 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1811 /* Setting LB for prune actions will result in replicated
1812 * packets to the internal switch that will be dropped.
1814 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1817 /* Set lan_en to TRUE if
1818 * 1. The switch is a VEB AND
1820 * 2.1 The lookup is a directional lookup like ethertype,
1821 * promiscuous, ethertype-MAC, promiscuous-VLAN
1822 * and default-port OR
1823 * 2.2 The lookup is VLAN, OR
1824 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1825 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1829 * The switch is a VEPA.
1831 * In all other cases, the LAN enable has to be set to false.
1834 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1835 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1836 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1837 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1838 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1839 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1840 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1841 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1842 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1843 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1852 * ice_fill_sw_rule - Helper function to fill switch rule structure
1853 * @hw: pointer to the hardware structure
1854 * @f_info: entry containing packet forwarding information
1855 * @s_rule: switch rule structure to be filled in based on mac_entry
1856 * @opc: switch rules population command type - pass in the command opcode
1859 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1860 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1862 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1870 if (opc == ice_aqc_opc_remove_sw_rules) {
1871 s_rule->pdata.lkup_tx_rx.act = 0;
1872 s_rule->pdata.lkup_tx_rx.index =
1873 CPU_TO_LE16(f_info->fltr_rule_id);
1874 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1878 eth_hdr_sz = sizeof(dummy_eth_header);
1879 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1881 /* initialize the ether header with a dummy header */
1882 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1883 ice_fill_sw_info(hw, f_info);
1885 switch (f_info->fltr_act) {
1886 case ICE_FWD_TO_VSI:
1887 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1888 ICE_SINGLE_ACT_VSI_ID_M;
1889 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1890 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1891 ICE_SINGLE_ACT_VALID_BIT;
1893 case ICE_FWD_TO_VSI_LIST:
1894 act |= ICE_SINGLE_ACT_VSI_LIST;
1895 act |= (f_info->fwd_id.vsi_list_id <<
1896 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1897 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1898 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1899 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1900 ICE_SINGLE_ACT_VALID_BIT;
1903 act |= ICE_SINGLE_ACT_TO_Q;
1904 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1905 ICE_SINGLE_ACT_Q_INDEX_M;
1907 case ICE_DROP_PACKET:
1908 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1909 ICE_SINGLE_ACT_VALID_BIT;
1911 case ICE_FWD_TO_QGRP:
1912 q_rgn = f_info->qgrp_size > 0 ?
1913 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1914 act |= ICE_SINGLE_ACT_TO_Q;
1915 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1916 ICE_SINGLE_ACT_Q_INDEX_M;
1917 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1918 ICE_SINGLE_ACT_Q_REGION_M;
1925 act |= ICE_SINGLE_ACT_LB_ENABLE;
1927 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1929 switch (f_info->lkup_type) {
1930 case ICE_SW_LKUP_MAC:
1931 daddr = f_info->l_data.mac.mac_addr;
1933 case ICE_SW_LKUP_VLAN:
1934 vlan_id = f_info->l_data.vlan.vlan_id;
1935 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1936 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1937 act |= ICE_SINGLE_ACT_PRUNE;
1938 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1941 case ICE_SW_LKUP_ETHERTYPE_MAC:
1942 daddr = f_info->l_data.ethertype_mac.mac_addr;
1944 case ICE_SW_LKUP_ETHERTYPE:
1945 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1946 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1948 case ICE_SW_LKUP_MAC_VLAN:
1949 daddr = f_info->l_data.mac_vlan.mac_addr;
1950 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1952 case ICE_SW_LKUP_PROMISC_VLAN:
1953 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1955 case ICE_SW_LKUP_PROMISC:
1956 daddr = f_info->l_data.mac_vlan.mac_addr;
1962 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1963 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1964 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1966 /* Recipe set depending on lookup type */
1967 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1968 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1969 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1972 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1973 ICE_NONDMA_TO_NONDMA);
1975 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1976 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1977 *off = CPU_TO_BE16(vlan_id);
1980 /* Create the switch rule with the final dummy Ethernet header */
1981 if (opc != ice_aqc_opc_update_sw_rules)
1982 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1986 * ice_add_marker_act
1987 * @hw: pointer to the hardware structure
1988 * @m_ent: the management entry for which sw marker needs to be added
1989 * @sw_marker: sw marker to tag the Rx descriptor with
1990 * @l_id: large action resource ID
1992 * Create a large action to hold software marker and update the switch rule
1993 * entry pointed by m_ent with newly created large action
1995 static enum ice_status
1996 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1997 u16 sw_marker, u16 l_id)
1999 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2000 /* For software marker we need 3 large actions
2001 * 1. FWD action: FWD TO VSI or VSI LIST
2002 * 2. GENERIC VALUE action to hold the profile ID
2003 * 3. GENERIC VALUE action to hold the software marker ID
2005 const u16 num_lg_acts = 3;
2006 enum ice_status status;
2012 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2013 return ICE_ERR_PARAM;
2015 /* Create two back-to-back switch rules and submit them to the HW using
2016 * one memory buffer:
2020 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2021 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2022 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2024 return ICE_ERR_NO_MEMORY;
2026 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2028 /* Fill in the first switch rule i.e. large action */
2029 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2030 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2031 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2033 /* First action VSI forwarding or VSI list forwarding depending on how
2036 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2037 m_ent->fltr_info.fwd_id.hw_vsi_id;
2039 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2040 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2041 ICE_LG_ACT_VSI_LIST_ID_M;
2042 if (m_ent->vsi_count > 1)
2043 act |= ICE_LG_ACT_VSI_LIST;
2044 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2046 /* Second action descriptor type */
2047 act = ICE_LG_ACT_GENERIC;
2049 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2050 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2052 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2053 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2055 /* Third action Marker value */
2056 act |= ICE_LG_ACT_GENERIC;
2057 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2058 ICE_LG_ACT_GENERIC_VALUE_M;
2060 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2062 /* call the fill switch rule to fill the lookup Tx Rx structure */
2063 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2064 ice_aqc_opc_update_sw_rules);
2066 /* Update the action to point to the large action ID */
2067 rx_tx->pdata.lkup_tx_rx.act =
2068 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2069 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2070 ICE_SINGLE_ACT_PTR_VAL_M));
2072 /* Use the filter rule ID of the previously created rule with single
2073 * act. Once the update happens, hardware will treat this as large
2076 rx_tx->pdata.lkup_tx_rx.index =
2077 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2079 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2080 ice_aqc_opc_update_sw_rules, NULL);
2082 m_ent->lg_act_idx = l_id;
2083 m_ent->sw_marker_id = sw_marker;
2086 ice_free(hw, lg_act);
2091 * ice_add_counter_act - add/update filter rule with counter action
2092 * @hw: pointer to the hardware structure
2093 * @m_ent: the management entry for which counter needs to be added
2094 * @counter_id: VLAN counter ID returned as part of allocate resource
2095 * @l_id: large action resource ID
2097 static enum ice_status
2098 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2099 u16 counter_id, u16 l_id)
2101 struct ice_aqc_sw_rules_elem *lg_act;
2102 struct ice_aqc_sw_rules_elem *rx_tx;
2103 enum ice_status status;
2104 /* 2 actions will be added while adding a large action counter */
2105 const int num_acts = 2;
2112 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2113 return ICE_ERR_PARAM;
2115 /* Create two back-to-back switch rules and submit them to the HW using
2116 * one memory buffer:
2120 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2121 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2122 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2125 return ICE_ERR_NO_MEMORY;
2127 rx_tx = (struct ice_aqc_sw_rules_elem *)
2128 ((u8 *)lg_act + lg_act_size);
2130 /* Fill in the first switch rule i.e. large action */
2131 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2132 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2133 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2135 /* First action VSI forwarding or VSI list forwarding depending on how
2138 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2139 m_ent->fltr_info.fwd_id.hw_vsi_id;
2141 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2142 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2143 ICE_LG_ACT_VSI_LIST_ID_M;
2144 if (m_ent->vsi_count > 1)
2145 act |= ICE_LG_ACT_VSI_LIST;
2146 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2148 /* Second action counter ID */
2149 act = ICE_LG_ACT_STAT_COUNT;
2150 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2151 ICE_LG_ACT_STAT_COUNT_M;
2152 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2154 /* call the fill switch rule to fill the lookup Tx Rx structure */
2155 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2156 ice_aqc_opc_update_sw_rules);
2158 act = ICE_SINGLE_ACT_PTR;
2159 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2160 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2162 /* Use the filter rule ID of the previously created rule with single
2163 * act. Once the update happens, hardware will treat this as large
2166 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2167 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2169 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2170 ice_aqc_opc_update_sw_rules, NULL);
2172 m_ent->lg_act_idx = l_id;
2173 m_ent->counter_index = counter_id;
2176 ice_free(hw, lg_act);
2181 * ice_create_vsi_list_map
2182 * @hw: pointer to the hardware structure
2183 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2184 * @num_vsi: number of VSI handles in the array
2185 * @vsi_list_id: VSI list ID generated as part of allocate resource
2187 * Helper function to create a new entry of VSI list ID to VSI mapping
2188 * using the given VSI list ID
2190 static struct ice_vsi_list_map_info *
2191 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2194 struct ice_switch_info *sw = hw->switch_info;
2195 struct ice_vsi_list_map_info *v_map;
2198 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2203 v_map->vsi_list_id = vsi_list_id;
2205 for (i = 0; i < num_vsi; i++)
2206 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2208 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2213 * ice_update_vsi_list_rule
2214 * @hw: pointer to the hardware structure
2215 * @vsi_handle_arr: array of VSI handles to form a VSI list
2216 * @num_vsi: number of VSI handles in the array
2217 * @vsi_list_id: VSI list ID generated as part of allocate resource
2218 * @remove: Boolean value to indicate if this is a remove action
2219 * @opc: switch rules population command type - pass in the command opcode
2220 * @lkup_type: lookup type of the filter
2222 * Call AQ command to add a new switch rule or update existing switch rule
2223 * using the given VSI list ID
2225 static enum ice_status
2226 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2227 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2228 enum ice_sw_lkup_type lkup_type)
2230 struct ice_aqc_sw_rules_elem *s_rule;
2231 enum ice_status status;
2237 return ICE_ERR_PARAM;
2239 if (lkup_type == ICE_SW_LKUP_MAC ||
2240 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2241 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2242 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2243 lkup_type == ICE_SW_LKUP_PROMISC ||
2244 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2245 lkup_type == ICE_SW_LKUP_LAST)
2246 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2247 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2248 else if (lkup_type == ICE_SW_LKUP_VLAN)
2249 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2250 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2252 return ICE_ERR_PARAM;
2254 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2255 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2257 return ICE_ERR_NO_MEMORY;
2258 for (i = 0; i < num_vsi; i++) {
2259 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2260 status = ICE_ERR_PARAM;
2263 /* AQ call requires hw_vsi_id(s) */
2264 s_rule->pdata.vsi_list.vsi[i] =
2265 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2268 s_rule->type = CPU_TO_LE16(type);
2269 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2270 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2272 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2275 ice_free(hw, s_rule);
2280 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2281 * @hw: pointer to the HW struct
2282 * @vsi_handle_arr: array of VSI handles to form a VSI list
2283 * @num_vsi: number of VSI handles in the array
2284 * @vsi_list_id: stores the ID of the VSI list to be created
2285 * @lkup_type: switch rule filter's lookup type
2287 static enum ice_status
2288 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2289 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2291 enum ice_status status;
2293 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2294 ice_aqc_opc_alloc_res);
2298 /* Update the newly created VSI list to include the specified VSIs */
2299 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2300 *vsi_list_id, false,
2301 ice_aqc_opc_add_sw_rules, lkup_type);
2305 * ice_create_pkt_fwd_rule
2306 * @hw: pointer to the hardware structure
2307 * @f_entry: entry containing packet forwarding information
2309 * Create switch rule with given filter information and add an entry
2310 * to the corresponding filter management list to track this switch rule
2313 static enum ice_status
2314 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2315 struct ice_fltr_list_entry *f_entry)
2317 struct ice_fltr_mgmt_list_entry *fm_entry;
2318 struct ice_aqc_sw_rules_elem *s_rule;
2319 enum ice_sw_lkup_type l_type;
2320 struct ice_sw_recipe *recp;
2321 enum ice_status status;
2323 s_rule = (struct ice_aqc_sw_rules_elem *)
2324 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2326 return ICE_ERR_NO_MEMORY;
2327 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2328 ice_malloc(hw, sizeof(*fm_entry));
2330 status = ICE_ERR_NO_MEMORY;
2331 goto ice_create_pkt_fwd_rule_exit;
2334 fm_entry->fltr_info = f_entry->fltr_info;
2336 /* Initialize all the fields for the management entry */
2337 fm_entry->vsi_count = 1;
2338 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2339 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2340 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2342 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2343 ice_aqc_opc_add_sw_rules);
2345 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2346 ice_aqc_opc_add_sw_rules, NULL);
2348 ice_free(hw, fm_entry);
2349 goto ice_create_pkt_fwd_rule_exit;
2352 f_entry->fltr_info.fltr_rule_id =
2353 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2354 fm_entry->fltr_info.fltr_rule_id =
2355 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2357 /* The book keeping entries will get removed when base driver
2358 * calls remove filter AQ command
2360 l_type = fm_entry->fltr_info.lkup_type;
2361 recp = &hw->switch_info->recp_list[l_type];
2362 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2364 ice_create_pkt_fwd_rule_exit:
2365 ice_free(hw, s_rule);
2370 * ice_update_pkt_fwd_rule
2371 * @hw: pointer to the hardware structure
2372 * @f_info: filter information for switch rule
2374 * Call AQ command to update a previously created switch rule with a
2377 static enum ice_status
2378 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2380 struct ice_aqc_sw_rules_elem *s_rule;
2381 enum ice_status status;
2383 s_rule = (struct ice_aqc_sw_rules_elem *)
2384 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2386 return ICE_ERR_NO_MEMORY;
2388 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2390 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2392 /* Update switch rule with new rule set to forward VSI list */
2393 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2394 ice_aqc_opc_update_sw_rules, NULL);
2396 ice_free(hw, s_rule);
2401 * ice_update_sw_rule_bridge_mode
2402 * @hw: pointer to the HW struct
2404 * Updates unicast switch filter rules based on VEB/VEPA mode
2406 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2408 struct ice_switch_info *sw = hw->switch_info;
2409 struct ice_fltr_mgmt_list_entry *fm_entry;
2410 enum ice_status status = ICE_SUCCESS;
2411 struct LIST_HEAD_TYPE *rule_head;
2412 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2414 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2415 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2417 ice_acquire_lock(rule_lock);
2418 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2420 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2421 u8 *addr = fi->l_data.mac.mac_addr;
2423 /* Update unicast Tx rules to reflect the selected
2426 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2427 (fi->fltr_act == ICE_FWD_TO_VSI ||
2428 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2429 fi->fltr_act == ICE_FWD_TO_Q ||
2430 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2431 status = ice_update_pkt_fwd_rule(hw, fi);
2437 ice_release_lock(rule_lock);
2443 * ice_add_update_vsi_list
2444 * @hw: pointer to the hardware structure
2445 * @m_entry: pointer to current filter management list entry
2446 * @cur_fltr: filter information from the book keeping entry
2447 * @new_fltr: filter information with the new VSI to be added
2449 * Call AQ command to add or update previously created VSI list with new VSI.
2451 * Helper function to do book keeping associated with adding filter information
2452 * The algorithm to do the book keeping is described below :
2453 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2454 * if only one VSI has been added till now
2455 * Allocate a new VSI list and add two VSIs
2456 * to this list using switch rule command
2457 * Update the previously created switch rule with the
2458 * newly created VSI list ID
2459 * if a VSI list was previously created
2460 * Add the new VSI to the previously created VSI list set
2461 * using the update switch rule command
2463 static enum ice_status
2464 ice_add_update_vsi_list(struct ice_hw *hw,
2465 struct ice_fltr_mgmt_list_entry *m_entry,
2466 struct ice_fltr_info *cur_fltr,
2467 struct ice_fltr_info *new_fltr)
2469 enum ice_status status = ICE_SUCCESS;
2470 u16 vsi_list_id = 0;
2472 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2473 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2474 return ICE_ERR_NOT_IMPL;
2476 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2477 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2478 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2479 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2480 return ICE_ERR_NOT_IMPL;
2482 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2483 /* Only one entry existed in the mapping and it was not already
2484 * a part of a VSI list. So, create a VSI list with the old and
2487 struct ice_fltr_info tmp_fltr;
2488 u16 vsi_handle_arr[2];
2490 /* A rule already exists with the new VSI being added */
2491 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2492 return ICE_ERR_ALREADY_EXISTS;
2494 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2495 vsi_handle_arr[1] = new_fltr->vsi_handle;
2496 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2498 new_fltr->lkup_type);
2502 tmp_fltr = *new_fltr;
2503 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2504 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2505 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2506 /* Update the previous switch rule of "MAC forward to VSI" to
2507 * "MAC fwd to VSI list"
2509 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2513 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2514 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2515 m_entry->vsi_list_info =
2516 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2519 /* If this entry was large action then the large action needs
2520 * to be updated to point to FWD to VSI list
2522 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2524 ice_add_marker_act(hw, m_entry,
2525 m_entry->sw_marker_id,
2526 m_entry->lg_act_idx);
2528 u16 vsi_handle = new_fltr->vsi_handle;
2529 enum ice_adminq_opc opcode;
2531 if (!m_entry->vsi_list_info)
2534 /* A rule already exists with the new VSI being added */
2535 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2538 /* Update the previously created VSI list set with
2539 * the new VSI ID passed in
2541 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2542 opcode = ice_aqc_opc_update_sw_rules;
2544 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2545 vsi_list_id, false, opcode,
2546 new_fltr->lkup_type);
2547 /* update VSI list mapping info with new VSI ID */
2549 ice_set_bit(vsi_handle,
2550 m_entry->vsi_list_info->vsi_map);
2553 m_entry->vsi_count++;
2558 * ice_find_rule_entry - Search a rule entry
2559 * @hw: pointer to the hardware structure
2560 * @recp_id: lookup type for which the specified rule needs to be searched
2561 * @f_info: rule information
2563 * Helper function to search for a given rule entry
2564 * Returns pointer to entry storing the rule if found
2566 static struct ice_fltr_mgmt_list_entry *
2567 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2569 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2570 struct ice_switch_info *sw = hw->switch_info;
2571 struct LIST_HEAD_TYPE *list_head;
2573 list_head = &sw->recp_list[recp_id].filt_rules;
2574 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2576 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2577 sizeof(f_info->l_data)) &&
2578 f_info->flag == list_itr->fltr_info.flag) {
2587 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2588 * @hw: pointer to the hardware structure
2589 * @recp_id: lookup type for which VSI lists needs to be searched
2590 * @vsi_handle: VSI handle to be found in VSI list
2591 * @vsi_list_id: VSI list ID found containing vsi_handle
2593 * Helper function to search a VSI list with single entry containing given VSI
2594 * handle element. This can be extended further to search VSI list with more
2595 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2597 static struct ice_vsi_list_map_info *
2598 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2601 struct ice_vsi_list_map_info *map_info = NULL;
2602 struct ice_switch_info *sw = hw->switch_info;
2603 struct LIST_HEAD_TYPE *list_head;
2605 list_head = &sw->recp_list[recp_id].filt_rules;
2606 if (sw->recp_list[recp_id].adv_rule) {
2607 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2609 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2610 ice_adv_fltr_mgmt_list_entry,
2612 if (list_itr->vsi_list_info) {
2613 map_info = list_itr->vsi_list_info;
2614 if (ice_is_bit_set(map_info->vsi_map,
2616 *vsi_list_id = map_info->vsi_list_id;
2622 struct ice_fltr_mgmt_list_entry *list_itr;
2624 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2625 ice_fltr_mgmt_list_entry,
2627 if (list_itr->vsi_count == 1 &&
2628 list_itr->vsi_list_info) {
2629 map_info = list_itr->vsi_list_info;
2630 if (ice_is_bit_set(map_info->vsi_map,
2632 *vsi_list_id = map_info->vsi_list_id;
2642 * ice_add_rule_internal - add rule for a given lookup type
2643 * @hw: pointer to the hardware structure
2644 * @recp_id: lookup type (recipe ID) for which rule has to be added
2645 * @f_entry: structure containing MAC forwarding information
2647 * Adds or updates the rule lists for a given recipe
2649 static enum ice_status
2650 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2651 struct ice_fltr_list_entry *f_entry)
2653 struct ice_switch_info *sw = hw->switch_info;
2654 struct ice_fltr_info *new_fltr, *cur_fltr;
2655 struct ice_fltr_mgmt_list_entry *m_entry;
2656 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2657 enum ice_status status = ICE_SUCCESS;
2659 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2660 return ICE_ERR_PARAM;
2662 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2663 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2664 f_entry->fltr_info.fwd_id.hw_vsi_id =
2665 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2667 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2669 ice_acquire_lock(rule_lock);
2670 new_fltr = &f_entry->fltr_info;
2671 if (new_fltr->flag & ICE_FLTR_RX)
2672 new_fltr->src = hw->port_info->lport;
2673 else if (new_fltr->flag & ICE_FLTR_TX)
2675 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2677 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2679 status = ice_create_pkt_fwd_rule(hw, f_entry);
2680 goto exit_add_rule_internal;
2683 cur_fltr = &m_entry->fltr_info;
2684 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2686 exit_add_rule_internal:
2687 ice_release_lock(rule_lock);
2692 * ice_remove_vsi_list_rule
2693 * @hw: pointer to the hardware structure
2694 * @vsi_list_id: VSI list ID generated as part of allocate resource
2695 * @lkup_type: switch rule filter lookup type
2697 * The VSI list should be emptied before this function is called to remove the
2700 static enum ice_status
2701 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2702 enum ice_sw_lkup_type lkup_type)
2704 struct ice_aqc_sw_rules_elem *s_rule;
2705 enum ice_status status;
2708 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2709 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2711 return ICE_ERR_NO_MEMORY;
2713 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2714 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2716 /* Free the vsi_list resource that we allocated. It is assumed that the
2717 * list is empty at this point.
2719 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2720 ice_aqc_opc_free_res);
2722 ice_free(hw, s_rule);
2727 * ice_rem_update_vsi_list
2728 * @hw: pointer to the hardware structure
2729 * @vsi_handle: VSI handle of the VSI to remove
2730 * @fm_list: filter management entry for which the VSI list management needs to
2733 static enum ice_status
2734 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2735 struct ice_fltr_mgmt_list_entry *fm_list)
2737 enum ice_sw_lkup_type lkup_type;
2738 enum ice_status status = ICE_SUCCESS;
2741 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2742 fm_list->vsi_count == 0)
2743 return ICE_ERR_PARAM;
2745 /* A rule with the VSI being removed does not exist */
2746 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2747 return ICE_ERR_DOES_NOT_EXIST;
2749 lkup_type = fm_list->fltr_info.lkup_type;
2750 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2751 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2752 ice_aqc_opc_update_sw_rules,
2757 fm_list->vsi_count--;
2758 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2760 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2761 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2762 struct ice_vsi_list_map_info *vsi_list_info =
2763 fm_list->vsi_list_info;
2766 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2768 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2769 return ICE_ERR_OUT_OF_RANGE;
2771 /* Make sure VSI list is empty before removing it below */
2772 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2774 ice_aqc_opc_update_sw_rules,
2779 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2780 tmp_fltr_info.fwd_id.hw_vsi_id =
2781 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2782 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2783 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2785 ice_debug(hw, ICE_DBG_SW,
2786 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2787 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2791 fm_list->fltr_info = tmp_fltr_info;
2794 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2795 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2796 struct ice_vsi_list_map_info *vsi_list_info =
2797 fm_list->vsi_list_info;
2799 /* Remove the VSI list since it is no longer used */
2800 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2802 ice_debug(hw, ICE_DBG_SW,
2803 "Failed to remove VSI list %d, error %d\n",
2804 vsi_list_id, status);
2808 LIST_DEL(&vsi_list_info->list_entry);
2809 ice_free(hw, vsi_list_info);
2810 fm_list->vsi_list_info = NULL;
2817 * ice_remove_rule_internal - Remove a filter rule of a given type
2819 * @hw: pointer to the hardware structure
2820 * @recp_id: recipe ID for which the rule needs to removed
2821 * @f_entry: rule entry containing filter information
2823 static enum ice_status
2824 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2825 struct ice_fltr_list_entry *f_entry)
2827 struct ice_switch_info *sw = hw->switch_info;
2828 struct ice_fltr_mgmt_list_entry *list_elem;
2829 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2830 enum ice_status status = ICE_SUCCESS;
2831 bool remove_rule = false;
2834 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2835 return ICE_ERR_PARAM;
2836 f_entry->fltr_info.fwd_id.hw_vsi_id =
2837 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2839 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2840 ice_acquire_lock(rule_lock);
2841 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2843 status = ICE_ERR_DOES_NOT_EXIST;
2847 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2849 } else if (!list_elem->vsi_list_info) {
2850 status = ICE_ERR_DOES_NOT_EXIST;
2852 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2853 /* a ref_cnt > 1 indicates that the vsi_list is being
2854 * shared by multiple rules. Decrement the ref_cnt and
2855 * remove this rule, but do not modify the list, as it
2856 * is in-use by other rules.
2858 list_elem->vsi_list_info->ref_cnt--;
2861 /* a ref_cnt of 1 indicates the vsi_list is only used
2862 * by one rule. However, the original removal request is only
2863 * for a single VSI. Update the vsi_list first, and only
2864 * remove the rule if there are no further VSIs in this list.
2866 vsi_handle = f_entry->fltr_info.vsi_handle;
2867 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2870 /* if VSI count goes to zero after updating the VSI list */
2871 if (list_elem->vsi_count == 0)
2876 /* Remove the lookup rule */
2877 struct ice_aqc_sw_rules_elem *s_rule;
2879 s_rule = (struct ice_aqc_sw_rules_elem *)
2880 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2882 status = ICE_ERR_NO_MEMORY;
2886 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2887 ice_aqc_opc_remove_sw_rules);
2889 status = ice_aq_sw_rules(hw, s_rule,
2890 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2891 ice_aqc_opc_remove_sw_rules, NULL);
2893 /* Remove a book keeping from the list */
2894 ice_free(hw, s_rule);
2899 LIST_DEL(&list_elem->list_entry);
2900 ice_free(hw, list_elem);
2903 ice_release_lock(rule_lock);
2908 * ice_aq_get_res_alloc - get allocated resources
2909 * @hw: pointer to the HW struct
2910 * @num_entries: pointer to u16 to store the number of resource entries returned
2911 * @buf: pointer to user-supplied buffer
2912 * @buf_size: size of buff
2913 * @cd: pointer to command details structure or NULL
2915 * The user-supplied buffer must be large enough to store the resource
2916 * information for all resource types. Each resource type is an
2917 * ice_aqc_get_res_resp_data_elem structure.
2920 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2921 u16 buf_size, struct ice_sq_cd *cd)
2923 struct ice_aqc_get_res_alloc *resp;
2924 enum ice_status status;
2925 struct ice_aq_desc desc;
2928 return ICE_ERR_BAD_PTR;
2930 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2931 return ICE_ERR_INVAL_SIZE;
2933 resp = &desc.params.get_res;
2935 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2936 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2938 if (!status && num_entries)
2939 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2945 * ice_aq_get_res_descs - get allocated resource descriptors
2946 * @hw: pointer to the hardware structure
2947 * @num_entries: number of resource entries in buffer
2948 * @buf: Indirect buffer to hold data parameters and response
2949 * @buf_size: size of buffer for indirect commands
2950 * @res_type: resource type
2951 * @res_shared: is resource shared
2952 * @desc_id: input - first desc ID to start; output - next desc ID
2953 * @cd: pointer to command details structure or NULL
2956 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2957 struct ice_aqc_get_allocd_res_desc_resp *buf,
2958 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2959 struct ice_sq_cd *cd)
2961 struct ice_aqc_get_allocd_res_desc *cmd;
2962 struct ice_aq_desc desc;
2963 enum ice_status status;
2965 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2967 cmd = &desc.params.get_res_desc;
2970 return ICE_ERR_PARAM;
2972 if (buf_size != (num_entries * sizeof(*buf)))
2973 return ICE_ERR_PARAM;
2975 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2977 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2978 ICE_AQC_RES_TYPE_M) | (res_shared ?
2979 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2980 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2982 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2984 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2990 * ice_add_mac - Add a MAC address based filter rule
2991 * @hw: pointer to the hardware structure
2992 * @m_list: list of MAC addresses and forwarding information
2994 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2995 * multiple unicast addresses, the function assumes that all the
2996 * addresses are unique in a given add_mac call. It doesn't
2997 * check for duplicates in this case, removing duplicates from a given
2998 * list should be taken care of in the caller of this function.
3001 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3003 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3004 struct ice_fltr_list_entry *m_list_itr;
3005 struct LIST_HEAD_TYPE *rule_head;
3006 u16 elem_sent, total_elem_left;
3007 struct ice_switch_info *sw;
3008 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3009 enum ice_status status = ICE_SUCCESS;
3010 u16 num_unicast = 0;
3014 return ICE_ERR_PARAM;
3016 sw = hw->switch_info;
3017 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3018 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3020 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3024 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3025 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3026 if (!ice_is_vsi_valid(hw, vsi_handle))
3027 return ICE_ERR_PARAM;
3028 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3029 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3030 /* update the src in case it is VSI num */
3031 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3032 return ICE_ERR_PARAM;
3033 m_list_itr->fltr_info.src = hw_vsi_id;
3034 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3035 IS_ZERO_ETHER_ADDR(add))
3036 return ICE_ERR_PARAM;
3037 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3038 /* Don't overwrite the unicast address */
3039 ice_acquire_lock(rule_lock);
3040 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3041 &m_list_itr->fltr_info)) {
3042 ice_release_lock(rule_lock);
3043 return ICE_ERR_ALREADY_EXISTS;
3045 ice_release_lock(rule_lock);
3047 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3048 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3049 m_list_itr->status =
3050 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3052 if (m_list_itr->status)
3053 return m_list_itr->status;
3057 ice_acquire_lock(rule_lock);
3058 /* Exit if no suitable entries were found for adding bulk switch rule */
3060 status = ICE_SUCCESS;
3061 goto ice_add_mac_exit;
3064 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3066 /* Allocate switch rule buffer for the bulk update for unicast */
3067 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3068 s_rule = (struct ice_aqc_sw_rules_elem *)
3069 ice_calloc(hw, num_unicast, s_rule_size);
3071 status = ICE_ERR_NO_MEMORY;
3072 goto ice_add_mac_exit;
3076 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3078 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3079 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3081 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3082 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3083 ice_aqc_opc_add_sw_rules);
3084 r_iter = (struct ice_aqc_sw_rules_elem *)
3085 ((u8 *)r_iter + s_rule_size);
3089 /* Call AQ bulk switch rule update for all unicast addresses */
3091 /* Call AQ switch rule in AQ_MAX chunk */
3092 for (total_elem_left = num_unicast; total_elem_left > 0;
3093 total_elem_left -= elem_sent) {
3094 struct ice_aqc_sw_rules_elem *entry = r_iter;
3096 elem_sent = min(total_elem_left,
3097 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3098 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3099 elem_sent, ice_aqc_opc_add_sw_rules,
3102 goto ice_add_mac_exit;
3103 r_iter = (struct ice_aqc_sw_rules_elem *)
3104 ((u8 *)r_iter + (elem_sent * s_rule_size));
3107 /* Fill up rule ID based on the value returned from FW */
3109 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3111 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3112 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3113 struct ice_fltr_mgmt_list_entry *fm_entry;
3115 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3116 f_info->fltr_rule_id =
3117 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3118 f_info->fltr_act = ICE_FWD_TO_VSI;
3119 /* Create an entry to track this MAC address */
3120 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3121 ice_malloc(hw, sizeof(*fm_entry));
3123 status = ICE_ERR_NO_MEMORY;
3124 goto ice_add_mac_exit;
3126 fm_entry->fltr_info = *f_info;
3127 fm_entry->vsi_count = 1;
3128 /* The book keeping entries will get removed when
3129 * base driver calls remove filter AQ command
3132 LIST_ADD(&fm_entry->list_entry, rule_head);
3133 r_iter = (struct ice_aqc_sw_rules_elem *)
3134 ((u8 *)r_iter + s_rule_size);
3139 ice_release_lock(rule_lock);
3141 ice_free(hw, s_rule);
3146 * ice_add_vlan_internal - Add one VLAN based filter rule
3147 * @hw: pointer to the hardware structure
3148 * @f_entry: filter entry containing one VLAN information
3150 static enum ice_status
3151 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3153 struct ice_switch_info *sw = hw->switch_info;
3154 struct ice_fltr_mgmt_list_entry *v_list_itr;
3155 struct ice_fltr_info *new_fltr, *cur_fltr;
3156 enum ice_sw_lkup_type lkup_type;
3157 u16 vsi_list_id = 0, vsi_handle;
3158 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3159 enum ice_status status = ICE_SUCCESS;
3161 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3162 return ICE_ERR_PARAM;
3164 f_entry->fltr_info.fwd_id.hw_vsi_id =
3165 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3166 new_fltr = &f_entry->fltr_info;
3168 /* VLAN ID should only be 12 bits */
3169 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3170 return ICE_ERR_PARAM;
3172 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3173 return ICE_ERR_PARAM;
3175 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3176 lkup_type = new_fltr->lkup_type;
3177 vsi_handle = new_fltr->vsi_handle;
3178 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3179 ice_acquire_lock(rule_lock);
3180 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3182 struct ice_vsi_list_map_info *map_info = NULL;
3184 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3185 /* All VLAN pruning rules use a VSI list. Check if
3186 * there is already a VSI list containing VSI that we
3187 * want to add. If found, use the same vsi_list_id for
3188 * this new VLAN rule or else create a new list.
3190 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3194 status = ice_create_vsi_list_rule(hw,
3202 /* Convert the action to forwarding to a VSI list. */
3203 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3204 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3207 status = ice_create_pkt_fwd_rule(hw, f_entry);
3209 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3212 status = ICE_ERR_DOES_NOT_EXIST;
3215 /* reuse VSI list for new rule and increment ref_cnt */
3217 v_list_itr->vsi_list_info = map_info;
3218 map_info->ref_cnt++;
3220 v_list_itr->vsi_list_info =
3221 ice_create_vsi_list_map(hw, &vsi_handle,
3225 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3226 /* Update existing VSI list to add new VSI ID only if it used
3229 cur_fltr = &v_list_itr->fltr_info;
3230 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3233 /* If VLAN rule exists and VSI list being used by this rule is
3234 * referenced by more than 1 VLAN rule. Then create a new VSI
3235 * list appending previous VSI with new VSI and update existing
3236 * VLAN rule to point to new VSI list ID
3238 struct ice_fltr_info tmp_fltr;
3239 u16 vsi_handle_arr[2];
3242 /* Current implementation only supports reusing VSI list with
3243 * one VSI count. We should never hit below condition
3245 if (v_list_itr->vsi_count > 1 &&
3246 v_list_itr->vsi_list_info->ref_cnt > 1) {
3247 ice_debug(hw, ICE_DBG_SW,
3248 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3249 status = ICE_ERR_CFG;
3254 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3257 /* A rule already exists with the new VSI being added */
3258 if (cur_handle == vsi_handle) {
3259 status = ICE_ERR_ALREADY_EXISTS;
3263 vsi_handle_arr[0] = cur_handle;
3264 vsi_handle_arr[1] = vsi_handle;
3265 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3266 &vsi_list_id, lkup_type);
3270 tmp_fltr = v_list_itr->fltr_info;
3271 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3272 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3273 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3274 /* Update the previous switch rule to a new VSI list which
3275 * includes current VSI that is requested
3277 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3281 /* before overriding VSI list map info. decrement ref_cnt of
3284 v_list_itr->vsi_list_info->ref_cnt--;
3286 /* now update to newly created list */
3287 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3288 v_list_itr->vsi_list_info =
3289 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3291 v_list_itr->vsi_count++;
3295 ice_release_lock(rule_lock);
3300 * ice_add_vlan - Add VLAN based filter rule
3301 * @hw: pointer to the hardware structure
3302 * @v_list: list of VLAN entries and forwarding information
3305 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3307 struct ice_fltr_list_entry *v_list_itr;
3310 return ICE_ERR_PARAM;
3312 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3314 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3315 return ICE_ERR_PARAM;
3316 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3317 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3318 if (v_list_itr->status)
3319 return v_list_itr->status;
3325 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3326 * @hw: pointer to the hardware structure
3327 * @mv_list: list of MAC and VLAN filters
3329 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3330 * pruning bits enabled, then it is the responsibility of the caller to make
3331 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3332 * VLAN won't be received on that VSI otherwise.
3335 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3337 struct ice_fltr_list_entry *mv_list_itr;
3339 if (!mv_list || !hw)
3340 return ICE_ERR_PARAM;
3342 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3344 enum ice_sw_lkup_type l_type =
3345 mv_list_itr->fltr_info.lkup_type;
3347 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3348 return ICE_ERR_PARAM;
3349 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3350 mv_list_itr->status =
3351 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3353 if (mv_list_itr->status)
3354 return mv_list_itr->status;
3360 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3361 * @hw: pointer to the hardware structure
3362 * @em_list: list of ether type MAC filter, MAC is optional
3364 * This function requires the caller to populate the entries in
3365 * the filter list with the necessary fields (including flags to
3366 * indicate Tx or Rx rules).
3369 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3371 struct ice_fltr_list_entry *em_list_itr;
3373 if (!em_list || !hw)
3374 return ICE_ERR_PARAM;
3376 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3378 enum ice_sw_lkup_type l_type =
3379 em_list_itr->fltr_info.lkup_type;
3381 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3382 l_type != ICE_SW_LKUP_ETHERTYPE)
3383 return ICE_ERR_PARAM;
3385 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3387 if (em_list_itr->status)
3388 return em_list_itr->status;
3394 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3395 * @hw: pointer to the hardware structure
3396 * @em_list: list of ethertype or ethertype MAC entries
3399 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3401 struct ice_fltr_list_entry *em_list_itr, *tmp;
3403 if (!em_list || !hw)
3404 return ICE_ERR_PARAM;
3406 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3408 enum ice_sw_lkup_type l_type =
3409 em_list_itr->fltr_info.lkup_type;
3411 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3412 l_type != ICE_SW_LKUP_ETHERTYPE)
3413 return ICE_ERR_PARAM;
3415 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3417 if (em_list_itr->status)
3418 return em_list_itr->status;
3424 * ice_rem_sw_rule_info
3425 * @hw: pointer to the hardware structure
3426 * @rule_head: pointer to the switch list structure that we want to delete
3429 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3431 if (!LIST_EMPTY(rule_head)) {
3432 struct ice_fltr_mgmt_list_entry *entry;
3433 struct ice_fltr_mgmt_list_entry *tmp;
3435 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3436 ice_fltr_mgmt_list_entry, list_entry) {
3437 LIST_DEL(&entry->list_entry);
3438 ice_free(hw, entry);
3444 * ice_rem_adv_rule_info
3445 * @hw: pointer to the hardware structure
3446 * @rule_head: pointer to the switch list structure that we want to delete
3449 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3451 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3452 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3454 if (LIST_EMPTY(rule_head))
3457 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3458 ice_adv_fltr_mgmt_list_entry, list_entry) {
3459 LIST_DEL(&lst_itr->list_entry);
3460 ice_free(hw, lst_itr->lkups);
3461 ice_free(hw, lst_itr);
3466 * ice_rem_all_sw_rules_info
3467 * @hw: pointer to the hardware structure
3469 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3471 struct ice_switch_info *sw = hw->switch_info;
3474 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3475 struct LIST_HEAD_TYPE *rule_head;
3477 rule_head = &sw->recp_list[i].filt_rules;
3478 if (!sw->recp_list[i].adv_rule)
3479 ice_rem_sw_rule_info(hw, rule_head);
3481 ice_rem_adv_rule_info(hw, rule_head);
3486 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3487 * @pi: pointer to the port_info structure
3488 * @vsi_handle: VSI handle to set as default
3489 * @set: true to add the above mentioned switch rule, false to remove it
3490 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3492 * add filter rule to set/unset given VSI as default VSI for the switch
3493 * (represented by swid)
3496 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3499 struct ice_aqc_sw_rules_elem *s_rule;
3500 struct ice_fltr_info f_info;
3501 struct ice_hw *hw = pi->hw;
3502 enum ice_adminq_opc opcode;
3503 enum ice_status status;
3507 if (!ice_is_vsi_valid(hw, vsi_handle))
3508 return ICE_ERR_PARAM;
3509 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3511 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3512 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3513 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3515 return ICE_ERR_NO_MEMORY;
3517 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3519 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3520 f_info.flag = direction;
3521 f_info.fltr_act = ICE_FWD_TO_VSI;
3522 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3524 if (f_info.flag & ICE_FLTR_RX) {
3525 f_info.src = pi->lport;
3526 f_info.src_id = ICE_SRC_ID_LPORT;
3528 f_info.fltr_rule_id =
3529 pi->dflt_rx_vsi_rule_id;
3530 } else if (f_info.flag & ICE_FLTR_TX) {
3531 f_info.src_id = ICE_SRC_ID_VSI;
3532 f_info.src = hw_vsi_id;
3534 f_info.fltr_rule_id =
3535 pi->dflt_tx_vsi_rule_id;
3539 opcode = ice_aqc_opc_add_sw_rules;
3541 opcode = ice_aqc_opc_remove_sw_rules;
3543 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3545 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3546 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3549 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3551 if (f_info.flag & ICE_FLTR_TX) {
3552 pi->dflt_tx_vsi_num = hw_vsi_id;
3553 pi->dflt_tx_vsi_rule_id = index;
3554 } else if (f_info.flag & ICE_FLTR_RX) {
3555 pi->dflt_rx_vsi_num = hw_vsi_id;
3556 pi->dflt_rx_vsi_rule_id = index;
3559 if (f_info.flag & ICE_FLTR_TX) {
3560 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3561 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3562 } else if (f_info.flag & ICE_FLTR_RX) {
3563 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3564 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3569 ice_free(hw, s_rule);
3574 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3575 * @hw: pointer to the hardware structure
3576 * @recp_id: lookup type for which the specified rule needs to be searched
3577 * @f_info: rule information
3579 * Helper function to search for a unicast rule entry - this is to be used
3580 * to remove unicast MAC filter that is not shared with other VSIs on the
3583 * Returns pointer to entry storing the rule if found
3585 static struct ice_fltr_mgmt_list_entry *
3586 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3587 struct ice_fltr_info *f_info)
3589 struct ice_switch_info *sw = hw->switch_info;
3590 struct ice_fltr_mgmt_list_entry *list_itr;
3591 struct LIST_HEAD_TYPE *list_head;
3593 list_head = &sw->recp_list[recp_id].filt_rules;
3594 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3596 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3597 sizeof(f_info->l_data)) &&
3598 f_info->fwd_id.hw_vsi_id ==
3599 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3600 f_info->flag == list_itr->fltr_info.flag)
3607 * ice_remove_mac - remove a MAC address based filter rule
3608 * @hw: pointer to the hardware structure
3609 * @m_list: list of MAC addresses and forwarding information
3611 * This function removes either a MAC filter rule or a specific VSI from a
3612 * VSI list for a multicast MAC address.
3614 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3615 * ice_add_mac. Caller should be aware that this call will only work if all
3616 * the entries passed into m_list were added previously. It will not attempt to
3617 * do a partial remove of entries that were found.
3620 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3622 struct ice_fltr_list_entry *list_itr, *tmp;
3623 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3626 return ICE_ERR_PARAM;
3628 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3629 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3631 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3632 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3635 if (l_type != ICE_SW_LKUP_MAC)
3636 return ICE_ERR_PARAM;
3638 vsi_handle = list_itr->fltr_info.vsi_handle;
3639 if (!ice_is_vsi_valid(hw, vsi_handle))
3640 return ICE_ERR_PARAM;
3642 list_itr->fltr_info.fwd_id.hw_vsi_id =
3643 ice_get_hw_vsi_num(hw, vsi_handle);
3644 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3645 /* Don't remove the unicast address that belongs to
3646 * another VSI on the switch, since it is not being
3649 ice_acquire_lock(rule_lock);
3650 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3651 &list_itr->fltr_info)) {
3652 ice_release_lock(rule_lock);
3653 return ICE_ERR_DOES_NOT_EXIST;
3655 ice_release_lock(rule_lock);
3657 list_itr->status = ice_remove_rule_internal(hw,
3660 if (list_itr->status)
3661 return list_itr->status;
3667 * ice_remove_vlan - Remove VLAN based filter rule
3668 * @hw: pointer to the hardware structure
3669 * @v_list: list of VLAN entries and forwarding information
3672 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3674 struct ice_fltr_list_entry *v_list_itr, *tmp;
3677 return ICE_ERR_PARAM;
3679 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3681 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3683 if (l_type != ICE_SW_LKUP_VLAN)
3684 return ICE_ERR_PARAM;
3685 v_list_itr->status = ice_remove_rule_internal(hw,
3688 if (v_list_itr->status)
3689 return v_list_itr->status;
3695 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3696 * @hw: pointer to the hardware structure
3697 * @v_list: list of MAC VLAN entries and forwarding information
3700 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3702 struct ice_fltr_list_entry *v_list_itr, *tmp;
3705 return ICE_ERR_PARAM;
3707 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3709 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3711 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3712 return ICE_ERR_PARAM;
3713 v_list_itr->status =
3714 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3716 if (v_list_itr->status)
3717 return v_list_itr->status;
3723 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3724 * @fm_entry: filter entry to inspect
3725 * @vsi_handle: VSI handle to compare with filter info
3728 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3730 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3731 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3732 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3733 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3738 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3739 * @hw: pointer to the hardware structure
3740 * @vsi_handle: VSI handle to remove filters from
3741 * @vsi_list_head: pointer to the list to add entry to
3742 * @fi: pointer to fltr_info of filter entry to copy & add
3744 * Helper function, used when creating a list of filters to remove from
3745 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3746 * original filter entry, with the exception of fltr_info.fltr_act and
3747 * fltr_info.fwd_id fields. These are set such that later logic can
3748 * extract which VSI to remove the fltr from, and pass on that information.
3750 static enum ice_status
3751 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3752 struct LIST_HEAD_TYPE *vsi_list_head,
3753 struct ice_fltr_info *fi)
3755 struct ice_fltr_list_entry *tmp;
3757 /* this memory is freed up in the caller function
3758 * once filters for this VSI are removed
3760 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3762 return ICE_ERR_NO_MEMORY;
3764 tmp->fltr_info = *fi;
3766 /* Overwrite these fields to indicate which VSI to remove filter from,
3767 * so find and remove logic can extract the information from the
3768 * list entries. Note that original entries will still have proper
3771 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3772 tmp->fltr_info.vsi_handle = vsi_handle;
3773 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3775 LIST_ADD(&tmp->list_entry, vsi_list_head);
3781 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3782 * @hw: pointer to the hardware structure
3783 * @vsi_handle: VSI handle to remove filters from
3784 * @lkup_list_head: pointer to the list that has certain lookup type filters
3785 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3787 * Locates all filters in lkup_list_head that are used by the given VSI,
3788 * and adds COPIES of those entries to vsi_list_head (intended to be used
3789 * to remove the listed filters).
3790 * Note that this means all entries in vsi_list_head must be explicitly
3791 * deallocated by the caller when done with list.
3793 static enum ice_status
3794 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3795 struct LIST_HEAD_TYPE *lkup_list_head,
3796 struct LIST_HEAD_TYPE *vsi_list_head)
3798 struct ice_fltr_mgmt_list_entry *fm_entry;
3799 enum ice_status status = ICE_SUCCESS;
3801 /* check to make sure VSI ID is valid and within boundary */
3802 if (!ice_is_vsi_valid(hw, vsi_handle))
3803 return ICE_ERR_PARAM;
3805 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3806 ice_fltr_mgmt_list_entry, list_entry) {
3807 struct ice_fltr_info *fi;
3809 fi = &fm_entry->fltr_info;
3810 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3813 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3822 * ice_determine_promisc_mask
3823 * @fi: filter info to parse
3825 * Helper function to determine which ICE_PROMISC_ mask corresponds
3826 * to given filter into.
3828 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3830 u16 vid = fi->l_data.mac_vlan.vlan_id;
3831 u8 *macaddr = fi->l_data.mac.mac_addr;
3832 bool is_tx_fltr = false;
3833 u8 promisc_mask = 0;
3835 if (fi->flag == ICE_FLTR_TX)
3838 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3839 promisc_mask |= is_tx_fltr ?
3840 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3841 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3842 promisc_mask |= is_tx_fltr ?
3843 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3844 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3845 promisc_mask |= is_tx_fltr ?
3846 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3848 promisc_mask |= is_tx_fltr ?
3849 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3851 return promisc_mask;
3855 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3856 * @hw: pointer to the hardware structure
3857 * @vsi_handle: VSI handle to retrieve info from
3858 * @promisc_mask: pointer to mask to be filled in
3859 * @vid: VLAN ID of promisc VLAN VSI
3862 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3865 struct ice_switch_info *sw = hw->switch_info;
3866 struct ice_fltr_mgmt_list_entry *itr;
3867 struct LIST_HEAD_TYPE *rule_head;
3868 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3870 if (!ice_is_vsi_valid(hw, vsi_handle))
3871 return ICE_ERR_PARAM;
3875 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3876 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3878 ice_acquire_lock(rule_lock);
3879 LIST_FOR_EACH_ENTRY(itr, rule_head,
3880 ice_fltr_mgmt_list_entry, list_entry) {
3881 /* Continue if this filter doesn't apply to this VSI or the
3882 * VSI ID is not in the VSI map for this filter
3884 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3887 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3889 ice_release_lock(rule_lock);
3895 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3896 * @hw: pointer to the hardware structure
3897 * @vsi_handle: VSI handle to retrieve info from
3898 * @promisc_mask: pointer to mask to be filled in
3899 * @vid: VLAN ID of promisc VLAN VSI
3902 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3905 struct ice_switch_info *sw = hw->switch_info;
3906 struct ice_fltr_mgmt_list_entry *itr;
3907 struct LIST_HEAD_TYPE *rule_head;
3908 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3910 if (!ice_is_vsi_valid(hw, vsi_handle))
3911 return ICE_ERR_PARAM;
3915 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3916 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3918 ice_acquire_lock(rule_lock);
3919 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3921 /* Continue if this filter doesn't apply to this VSI or the
3922 * VSI ID is not in the VSI map for this filter
3924 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3927 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3929 ice_release_lock(rule_lock);
3935 * ice_remove_promisc - Remove promisc based filter rules
3936 * @hw: pointer to the hardware structure
3937 * @recp_id: recipe ID for which the rule needs to removed
3938 * @v_list: list of promisc entries
3940 static enum ice_status
3941 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3942 struct LIST_HEAD_TYPE *v_list)
3944 struct ice_fltr_list_entry *v_list_itr, *tmp;
3946 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3948 v_list_itr->status =
3949 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3950 if (v_list_itr->status)
3951 return v_list_itr->status;
3957 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3958 * @hw: pointer to the hardware structure
3959 * @vsi_handle: VSI handle to clear mode
3960 * @promisc_mask: mask of promiscuous config bits to clear
3961 * @vid: VLAN ID to clear VLAN promiscuous
3964 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3967 struct ice_switch_info *sw = hw->switch_info;
3968 struct ice_fltr_list_entry *fm_entry, *tmp;
3969 struct LIST_HEAD_TYPE remove_list_head;
3970 struct ice_fltr_mgmt_list_entry *itr;
3971 struct LIST_HEAD_TYPE *rule_head;
3972 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3973 enum ice_status status = ICE_SUCCESS;
3976 if (!ice_is_vsi_valid(hw, vsi_handle))
3977 return ICE_ERR_PARAM;
3979 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3980 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3982 recipe_id = ICE_SW_LKUP_PROMISC;
3984 rule_head = &sw->recp_list[recipe_id].filt_rules;
3985 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3987 INIT_LIST_HEAD(&remove_list_head);
3989 ice_acquire_lock(rule_lock);
3990 LIST_FOR_EACH_ENTRY(itr, rule_head,
3991 ice_fltr_mgmt_list_entry, list_entry) {
3992 struct ice_fltr_info *fltr_info;
3993 u8 fltr_promisc_mask = 0;
3995 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3997 fltr_info = &itr->fltr_info;
3999 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4000 vid != fltr_info->l_data.mac_vlan.vlan_id)
4003 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4005 /* Skip if filter is not completely specified by given mask */
4006 if (fltr_promisc_mask & ~promisc_mask)
4009 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4013 ice_release_lock(rule_lock);
4014 goto free_fltr_list;
4017 ice_release_lock(rule_lock);
4019 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4022 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4023 ice_fltr_list_entry, list_entry) {
4024 LIST_DEL(&fm_entry->list_entry);
4025 ice_free(hw, fm_entry);
4032 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4033 * @hw: pointer to the hardware structure
4034 * @vsi_handle: VSI handle to configure
4035 * @promisc_mask: mask of promiscuous config bits
4036 * @vid: VLAN ID to set VLAN promiscuous
4039 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4041 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4042 struct ice_fltr_list_entry f_list_entry;
4043 struct ice_fltr_info new_fltr;
4044 enum ice_status status = ICE_SUCCESS;
4050 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4052 if (!ice_is_vsi_valid(hw, vsi_handle))
4053 return ICE_ERR_PARAM;
4054 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4056 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4058 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4059 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4060 new_fltr.l_data.mac_vlan.vlan_id = vid;
4061 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4063 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4064 recipe_id = ICE_SW_LKUP_PROMISC;
4067 /* Separate filters must be set for each direction/packet type
4068 * combination, so we will loop over the mask value, store the
4069 * individual type, and clear it out in the input mask as it
4072 while (promisc_mask) {
4078 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4079 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4080 pkt_type = UCAST_FLTR;
4081 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4082 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4083 pkt_type = UCAST_FLTR;
4085 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4086 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4087 pkt_type = MCAST_FLTR;
4088 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4089 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4090 pkt_type = MCAST_FLTR;
4092 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4093 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4094 pkt_type = BCAST_FLTR;
4095 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4096 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4097 pkt_type = BCAST_FLTR;
4101 /* Check for VLAN promiscuous flag */
4102 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4103 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4104 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4105 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4109 /* Set filter DA based on packet type */
4110 mac_addr = new_fltr.l_data.mac.mac_addr;
4111 if (pkt_type == BCAST_FLTR) {
4112 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4113 } else if (pkt_type == MCAST_FLTR ||
4114 pkt_type == UCAST_FLTR) {
4115 /* Use the dummy ether header DA */
4116 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4117 ICE_NONDMA_TO_NONDMA);
4118 if (pkt_type == MCAST_FLTR)
4119 mac_addr[0] |= 0x1; /* Set multicast bit */
4122 /* Need to reset this to zero for all iterations */
4125 new_fltr.flag |= ICE_FLTR_TX;
4126 new_fltr.src = hw_vsi_id;
4128 new_fltr.flag |= ICE_FLTR_RX;
4129 new_fltr.src = hw->port_info->lport;
4132 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4133 new_fltr.vsi_handle = vsi_handle;
4134 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4135 f_list_entry.fltr_info = new_fltr;
4137 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4138 if (status != ICE_SUCCESS)
4139 goto set_promisc_exit;
4147 * ice_set_vlan_vsi_promisc
4148 * @hw: pointer to the hardware structure
4149 * @vsi_handle: VSI handle to configure
4150 * @promisc_mask: mask of promiscuous config bits
4151 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4153 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4156 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4157 bool rm_vlan_promisc)
4159 struct ice_switch_info *sw = hw->switch_info;
4160 struct ice_fltr_list_entry *list_itr, *tmp;
4161 struct LIST_HEAD_TYPE vsi_list_head;
4162 struct LIST_HEAD_TYPE *vlan_head;
4163 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4164 enum ice_status status;
4167 INIT_LIST_HEAD(&vsi_list_head);
4168 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4169 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4170 ice_acquire_lock(vlan_lock);
4171 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4173 ice_release_lock(vlan_lock);
4175 goto free_fltr_list;
4177 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4179 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4180 if (rm_vlan_promisc)
4181 status = ice_clear_vsi_promisc(hw, vsi_handle,
4182 promisc_mask, vlan_id);
4184 status = ice_set_vsi_promisc(hw, vsi_handle,
4185 promisc_mask, vlan_id);
4191 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4192 ice_fltr_list_entry, list_entry) {
4193 LIST_DEL(&list_itr->list_entry);
4194 ice_free(hw, list_itr);
4200 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4201 * @hw: pointer to the hardware structure
4202 * @vsi_handle: VSI handle to remove filters from
4203 * @lkup: switch rule filter lookup type
4206 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4207 enum ice_sw_lkup_type lkup)
4209 struct ice_switch_info *sw = hw->switch_info;
4210 struct ice_fltr_list_entry *fm_entry;
4211 struct LIST_HEAD_TYPE remove_list_head;
4212 struct LIST_HEAD_TYPE *rule_head;
4213 struct ice_fltr_list_entry *tmp;
4214 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4215 enum ice_status status;
4217 INIT_LIST_HEAD(&remove_list_head);
4218 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4219 rule_head = &sw->recp_list[lkup].filt_rules;
4220 ice_acquire_lock(rule_lock);
4221 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4223 ice_release_lock(rule_lock);
4228 case ICE_SW_LKUP_MAC:
4229 ice_remove_mac(hw, &remove_list_head);
4231 case ICE_SW_LKUP_VLAN:
4232 ice_remove_vlan(hw, &remove_list_head);
4234 case ICE_SW_LKUP_PROMISC:
4235 case ICE_SW_LKUP_PROMISC_VLAN:
4236 ice_remove_promisc(hw, lkup, &remove_list_head);
4238 case ICE_SW_LKUP_MAC_VLAN:
4239 ice_remove_mac_vlan(hw, &remove_list_head);
4241 case ICE_SW_LKUP_ETHERTYPE:
4242 case ICE_SW_LKUP_ETHERTYPE_MAC:
4243 ice_remove_eth_mac(hw, &remove_list_head);
4245 case ICE_SW_LKUP_DFLT:
4246 ice_debug(hw, ICE_DBG_SW,
4247 "Remove filters for this lookup type hasn't been implemented yet\n");
4249 case ICE_SW_LKUP_LAST:
4250 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4254 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4255 ice_fltr_list_entry, list_entry) {
4256 LIST_DEL(&fm_entry->list_entry);
4257 ice_free(hw, fm_entry);
4262 * ice_remove_vsi_fltr - Remove all filters for a VSI
4263 * @hw: pointer to the hardware structure
4264 * @vsi_handle: VSI handle to remove filters from
4266 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4268 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4270 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4271 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4272 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4273 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4274 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4275 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4276 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4277 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4281 * ice_alloc_res_cntr - allocating resource counter
4282 * @hw: pointer to the hardware structure
4283 * @type: type of resource
4284 * @alloc_shared: if set it is shared else dedicated
4285 * @num_items: number of entries requested for FD resource type
4286 * @counter_id: counter index returned by AQ call
4289 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4292 struct ice_aqc_alloc_free_res_elem *buf;
4293 enum ice_status status;
4296 /* Allocate resource */
4297 buf_len = sizeof(*buf);
4298 buf = (struct ice_aqc_alloc_free_res_elem *)
4299 ice_malloc(hw, buf_len);
4301 return ICE_ERR_NO_MEMORY;
4303 buf->num_elems = CPU_TO_LE16(num_items);
4304 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4305 ICE_AQC_RES_TYPE_M) | alloc_shared);
4307 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4308 ice_aqc_opc_alloc_res, NULL);
4312 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4320 * ice_free_res_cntr - free resource counter
4321 * @hw: pointer to the hardware structure
4322 * @type: type of resource
4323 * @alloc_shared: if set it is shared else dedicated
4324 * @num_items: number of entries to be freed for FD resource type
4325 * @counter_id: counter ID resource which needs to be freed
4328 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4331 struct ice_aqc_alloc_free_res_elem *buf;
4332 enum ice_status status;
4336 buf_len = sizeof(*buf);
4337 buf = (struct ice_aqc_alloc_free_res_elem *)
4338 ice_malloc(hw, buf_len);
4340 return ICE_ERR_NO_MEMORY;
4342 buf->num_elems = CPU_TO_LE16(num_items);
4343 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4344 ICE_AQC_RES_TYPE_M) | alloc_shared);
4345 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4347 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4348 ice_aqc_opc_free_res, NULL);
4350 ice_debug(hw, ICE_DBG_SW,
4351 "counter resource could not be freed\n");
4358 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4359 * @hw: pointer to the hardware structure
4360 * @counter_id: returns counter index
4362 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4364 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4365 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4370 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4371 * @hw: pointer to the hardware structure
4372 * @counter_id: counter index to be freed
4374 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4376 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4377 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4382 * ice_alloc_res_lg_act - add large action resource
4383 * @hw: pointer to the hardware structure
4384 * @l_id: large action ID to fill it in
4385 * @num_acts: number of actions to hold with a large action entry
4387 static enum ice_status
4388 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4390 struct ice_aqc_alloc_free_res_elem *sw_buf;
4391 enum ice_status status;
4394 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4395 return ICE_ERR_PARAM;
4397 /* Allocate resource for large action */
4398 buf_len = sizeof(*sw_buf);
4399 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4400 ice_malloc(hw, buf_len);
4402 return ICE_ERR_NO_MEMORY;
4404 sw_buf->num_elems = CPU_TO_LE16(1);
4406 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4407 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4408 * If num_acts is greater than 2, then use
4409 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4410 * The num_acts cannot exceed 4. This was ensured at the
4411 * beginning of the function.
4414 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4415 else if (num_acts == 2)
4416 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4418 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4420 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4421 ice_aqc_opc_alloc_res, NULL);
4423 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4425 ice_free(hw, sw_buf);
4430 * ice_add_mac_with_sw_marker - add filter with sw marker
4431 * @hw: pointer to the hardware structure
4432 * @f_info: filter info structure containing the MAC filter information
4433 * @sw_marker: sw marker to tag the Rx descriptor with
4436 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4439 struct ice_switch_info *sw = hw->switch_info;
4440 struct ice_fltr_mgmt_list_entry *m_entry;
4441 struct ice_fltr_list_entry fl_info;
4442 struct LIST_HEAD_TYPE l_head;
4443 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4444 enum ice_status ret;
4448 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4449 return ICE_ERR_PARAM;
4451 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4452 return ICE_ERR_PARAM;
4454 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4455 return ICE_ERR_PARAM;
4457 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4458 return ICE_ERR_PARAM;
4459 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4461 /* Add filter if it doesn't exist so then the adding of large
4462 * action always results in update
4465 INIT_LIST_HEAD(&l_head);
4466 fl_info.fltr_info = *f_info;
4467 LIST_ADD(&fl_info.list_entry, &l_head);
4469 entry_exists = false;
4470 ret = ice_add_mac(hw, &l_head);
4471 if (ret == ICE_ERR_ALREADY_EXISTS)
4472 entry_exists = true;
4476 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4477 ice_acquire_lock(rule_lock);
4478 /* Get the book keeping entry for the filter */
4479 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4483 /* If counter action was enabled for this rule then don't enable
4484 * sw marker large action
4486 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4487 ret = ICE_ERR_PARAM;
4491 /* if same marker was added before */
4492 if (m_entry->sw_marker_id == sw_marker) {
4493 ret = ICE_ERR_ALREADY_EXISTS;
4497 /* Allocate a hardware table entry to hold large act. Three actions
4498 * for marker based large action
4500 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4504 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4507 /* Update the switch rule to add the marker action */
4508 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4510 ice_release_lock(rule_lock);
4515 ice_release_lock(rule_lock);
4516 /* only remove entry if it did not exist previously */
4518 ret = ice_remove_mac(hw, &l_head);
4524 * ice_add_mac_with_counter - add filter with counter enabled
4525 * @hw: pointer to the hardware structure
4526 * @f_info: pointer to filter info structure containing the MAC filter
4530 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4532 struct ice_switch_info *sw = hw->switch_info;
4533 struct ice_fltr_mgmt_list_entry *m_entry;
4534 struct ice_fltr_list_entry fl_info;
4535 struct LIST_HEAD_TYPE l_head;
4536 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4537 enum ice_status ret;
4542 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4543 return ICE_ERR_PARAM;
4545 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4546 return ICE_ERR_PARAM;
4548 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4549 return ICE_ERR_PARAM;
4550 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4552 entry_exist = false;
4554 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4556 /* Add filter if it doesn't exist so then the adding of large
4557 * action always results in update
4559 INIT_LIST_HEAD(&l_head);
4561 fl_info.fltr_info = *f_info;
4562 LIST_ADD(&fl_info.list_entry, &l_head);
4564 ret = ice_add_mac(hw, &l_head);
4565 if (ret == ICE_ERR_ALREADY_EXISTS)
4570 ice_acquire_lock(rule_lock);
4571 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4573 ret = ICE_ERR_BAD_PTR;
4577 /* Don't enable counter for a filter for which sw marker was enabled */
4578 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4579 ret = ICE_ERR_PARAM;
4583 /* If a counter was already enabled then don't need to add again */
4584 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4585 ret = ICE_ERR_ALREADY_EXISTS;
4589 /* Allocate a hardware table entry to VLAN counter */
4590 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4594 /* Allocate a hardware table entry to hold large act. Two actions for
4595 * counter based large action
4597 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4601 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4604 /* Update the switch rule to add the counter action */
4605 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4607 ice_release_lock(rule_lock);
4612 ice_release_lock(rule_lock);
4613 /* only remove entry if it did not exist previously */
4615 ret = ice_remove_mac(hw, &l_head);
4620 /* This is mapping table entry that maps every word within a given protocol
4621 * structure to the real byte offset as per the specification of that
4623 * for example dst address is 3 words in ethertype header and corresponding
4624 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4625 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4626 * matching entry describing its field. This needs to be updated if new
4627 * structure is added to that union.
4629 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4630 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4631 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4632 { ICE_ETYPE_OL, { 0 } },
4633 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4634 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4635 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4636 26, 28, 30, 32, 34, 36, 38 } },
4637 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4638 26, 28, 30, 32, 34, 36, 38 } },
4639 { ICE_TCP_IL, { 0, 2 } },
4640 { ICE_UDP_OF, { 0, 2 } },
4641 { ICE_UDP_ILOS, { 0, 2 } },
4642 { ICE_SCTP_IL, { 0, 2 } },
4643 { ICE_VXLAN, { 8, 10, 12, 14 } },
4644 { ICE_GENEVE, { 8, 10, 12, 14 } },
4645 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4646 { ICE_NVGRE, { 0, 2, 4, 6 } },
4647 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4648 { ICE_PPPOE, { 0, 2, 4, 6 } },
4649 { ICE_PROTOCOL_LAST, { 0 } }
4652 /* The following table describes preferred grouping of recipes.
4653 * If a recipe that needs to be programmed is a superset or matches one of the
4654 * following combinations, then the recipe needs to be chained as per the
4658 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4659 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4660 { ICE_MAC_IL, ICE_MAC_IL_HW },
4661 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4662 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4663 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4664 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4665 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4666 { ICE_TCP_IL, ICE_TCP_IL_HW },
4667 { ICE_UDP_OF, ICE_UDP_OF_HW },
4668 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4669 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4670 { ICE_VXLAN, ICE_UDP_OF_HW },
4671 { ICE_GENEVE, ICE_UDP_OF_HW },
4672 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4673 { ICE_NVGRE, ICE_GRE_OF_HW },
4674 { ICE_GTP, ICE_UDP_OF_HW },
4675 { ICE_PPPOE, ICE_PPPOE_HW },
4676 { ICE_PROTOCOL_LAST, 0 }
4680 * ice_find_recp - find a recipe
4681 * @hw: pointer to the hardware structure
4682 * @lkup_exts: extension sequence to match
4684 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4686 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4688 bool refresh_required = true;
4689 struct ice_sw_recipe *recp;
4692 /* Walk through existing recipes to find a match */
4693 recp = hw->switch_info->recp_list;
4694 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4695 /* If recipe was not created for this ID, in SW bookkeeping,
4696 * check if FW has an entry for this recipe. If the FW has an
4697 * entry update it in our SW bookkeeping and continue with the
4700 if (!recp[i].recp_created)
4701 if (ice_get_recp_frm_fw(hw,
4702 hw->switch_info->recp_list, i,
4706 /* Skip inverse action recipes */
4707 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4708 ICE_AQ_RECIPE_ACT_INV_ACT)
4711 /* if number of words we are looking for match */
4712 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4713 struct ice_fv_word *a = lkup_exts->fv_words;
4714 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4718 for (p = 0; p < lkup_exts->n_val_words; p++) {
4719 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4721 if (a[p].off == b[q].off &&
4722 a[p].prot_id == b[q].prot_id)
4723 /* Found the "p"th word in the
4728 /* After walking through all the words in the
4729 * "i"th recipe if "p"th word was not found then
4730 * this recipe is not what we are looking for.
4731 * So break out from this loop and try the next
4734 if (q >= recp[i].lkup_exts.n_val_words) {
4739 /* If for "i"th recipe the found was never set to false
4740 * then it means we found our match
4743 return i; /* Return the recipe ID */
4746 return ICE_MAX_NUM_RECIPES;
4750 * ice_prot_type_to_id - get protocol ID from protocol type
4751 * @type: protocol type
4752 * @id: pointer to variable that will receive the ID
4754 * Returns true if found, false otherwise
4756 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4760 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4761 if (ice_prot_id_tbl[i].type == type) {
4762 *id = ice_prot_id_tbl[i].protocol_id;
4769 * ice_find_valid_words - count valid words
4770 * @rule: advanced rule with lookup information
4771 * @lkup_exts: byte offset extractions of the words that are valid
4773 * calculate valid words in a lookup rule using mask value
4776 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4777 struct ice_prot_lkup_ext *lkup_exts)
4783 if (!ice_prot_type_to_id(rule->type, &prot_id))
4786 word = lkup_exts->n_val_words;
4788 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4789 if (((u16 *)&rule->m_u)[j] &&
4790 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4791 /* No more space to accommodate */
4792 if (word >= ICE_MAX_CHAIN_WORDS)
4794 lkup_exts->fv_words[word].off =
4795 ice_prot_ext[rule->type].offs[j];
4796 lkup_exts->fv_words[word].prot_id =
4797 ice_prot_id_tbl[rule->type].protocol_id;
4798 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4802 ret_val = word - lkup_exts->n_val_words;
4803 lkup_exts->n_val_words = word;
4809 * ice_create_first_fit_recp_def - Create a recipe grouping
4810 * @hw: pointer to the hardware structure
4811 * @lkup_exts: an array of protocol header extractions
4812 * @rg_list: pointer to a list that stores new recipe groups
4813 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4815 * Using first fit algorithm, take all the words that are still not done
4816 * and start grouping them in 4-word groups. Each group makes up one
4819 static enum ice_status
4820 ice_create_first_fit_recp_def(struct ice_hw *hw,
4821 struct ice_prot_lkup_ext *lkup_exts,
4822 struct LIST_HEAD_TYPE *rg_list,
4825 struct ice_pref_recipe_group *grp = NULL;
4830 /* Walk through every word in the rule to check if it is not done. If so
4831 * then this word needs to be part of a new recipe.
4833 for (j = 0; j < lkup_exts->n_val_words; j++)
4834 if (!ice_is_bit_set(lkup_exts->done, j)) {
4836 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4837 struct ice_recp_grp_entry *entry;
4839 entry = (struct ice_recp_grp_entry *)
4840 ice_malloc(hw, sizeof(*entry));
4842 return ICE_ERR_NO_MEMORY;
4843 LIST_ADD(&entry->l_entry, rg_list);
4844 grp = &entry->r_group;
4848 grp->pairs[grp->n_val_pairs].prot_id =
4849 lkup_exts->fv_words[j].prot_id;
4850 grp->pairs[grp->n_val_pairs].off =
4851 lkup_exts->fv_words[j].off;
4852 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4860 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4861 * @hw: pointer to the hardware structure
4862 * @fv_list: field vector with the extraction sequence information
4863 * @rg_list: recipe groupings with protocol-offset pairs
4865 * Helper function to fill in the field vector indices for protocol-offset
4866 * pairs. These indexes are then ultimately programmed into a recipe.
4868 static enum ice_status
4869 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4870 struct LIST_HEAD_TYPE *rg_list)
4872 struct ice_sw_fv_list_entry *fv;
4873 struct ice_recp_grp_entry *rg;
4874 struct ice_fv_word *fv_ext;
4876 if (LIST_EMPTY(fv_list))
4879 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4880 fv_ext = fv->fv_ptr->ew;
4882 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4885 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4886 struct ice_fv_word *pr;
4891 pr = &rg->r_group.pairs[i];
4892 mask = rg->r_group.mask[i];
4894 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4895 if (fv_ext[j].prot_id == pr->prot_id &&
4896 fv_ext[j].off == pr->off) {
4899 /* Store index of field vector */
4901 /* Mask is given by caller as big
4902 * endian, but sent to FW as little
4905 rg->fv_mask[i] = mask << 8 | mask >> 8;
4909 /* Protocol/offset could not be found, caller gave an
4913 return ICE_ERR_PARAM;
4921 * ice_find_free_recp_res_idx - find free result indexes for recipe
4922 * @hw: pointer to hardware structure
4923 * @profiles: bitmap of profiles that will be associated with the new recipe
4924 * @free_idx: pointer to variable to receive the free index bitmap
4926 * The algorithm used here is:
4927 * 1. When creating a new recipe, create a set P which contains all
4928 * Profiles that will be associated with our new recipe
4930 * 2. For each Profile p in set P:
4931 * a. Add all recipes associated with Profile p into set R
4932 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4933 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4934 * i. Or just assume they all have the same possible indexes:
4936 * i.e., PossibleIndexes = 0x0000F00000000000
4938 * 3. For each Recipe r in set R:
4939 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4940 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4942 * FreeIndexes will contain the bits indicating the indexes free for use,
4943 * then the code needs to update the recipe[r].used_result_idx_bits to
4944 * indicate which indexes were selected for use by this recipe.
4947 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
4948 ice_bitmap_t *free_idx)
4950 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4951 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
4952 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4956 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
4957 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
4958 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4959 ice_init_possible_res_bm(possible_idx);
4961 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
4962 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
4963 ice_set_bit(bit, possible_idx);
4965 /* For each profile we are going to associate the recipe with, add the
4966 * recipes that are associated with that profile. This will give us
4967 * the set of recipes that our recipe may collide with.
4970 while (ICE_MAX_NUM_PROFILES >
4971 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
4972 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
4973 ICE_MAX_NUM_RECIPES);
4978 /* For each recipe that our new recipe may collide with, determine
4979 * which indexes have been used.
4981 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
4982 if (ice_is_bit_set(recipes, bit))
4983 ice_or_bitmap(used_idx, used_idx,
4984 hw->switch_info->recp_list[bit].res_idxs,
4987 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4989 /* return number of free indexes */
4991 while (ICE_MAX_FV_WORDS >
4992 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5001 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5002 * @hw: pointer to hardware structure
5003 * @rm: recipe management list entry
5004 * @match_tun: if field vector index for tunnel needs to be programmed
5005 * @profiles: bitmap of profiles that will be assocated.
5007 static enum ice_status
5008 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5009 bool match_tun, ice_bitmap_t *profiles)
5011 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5012 struct ice_aqc_recipe_data_elem *tmp;
5013 struct ice_aqc_recipe_data_elem *buf;
5014 struct ice_recp_grp_entry *entry;
5015 enum ice_status status;
5021 /* When more than one recipe are required, another recipe is needed to
5022 * chain them together. Matching a tunnel metadata ID takes up one of
5023 * the match fields in the chaining recipe reducing the number of
5024 * chained recipes by one.
5026 /* check number of free result indices */
5027 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5028 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5030 if (rm->n_grp_count > 1) {
5031 if (rm->n_grp_count > free_res_idx)
5032 return ICE_ERR_MAX_LIMIT;
5037 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5038 ICE_MAX_NUM_RECIPES,
5041 return ICE_ERR_NO_MEMORY;
5043 buf = (struct ice_aqc_recipe_data_elem *)
5044 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5046 status = ICE_ERR_NO_MEMORY;
5050 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5051 recipe_count = ICE_MAX_NUM_RECIPES;
5052 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5054 if (status || recipe_count == 0)
5057 /* Allocate the recipe resources, and configure them according to the
5058 * match fields from protocol headers and extracted field vectors.
5060 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5061 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5064 status = ice_alloc_recipe(hw, &entry->rid);
5068 /* Clear the result index of the located recipe, as this will be
5069 * updated, if needed, later in the recipe creation process.
5071 tmp[0].content.result_indx = 0;
5073 buf[recps] = tmp[0];
5074 buf[recps].recipe_indx = (u8)entry->rid;
5075 /* if the recipe is a non-root recipe RID should be programmed
5076 * as 0 for the rules to be applied correctly.
5078 buf[recps].content.rid = 0;
5079 ice_memset(&buf[recps].content.lkup_indx, 0,
5080 sizeof(buf[recps].content.lkup_indx),
5083 /* All recipes use look-up index 0 to match switch ID. */
5084 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5085 buf[recps].content.mask[0] =
5086 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5087 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5090 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5091 buf[recps].content.lkup_indx[i] = 0x80;
5092 buf[recps].content.mask[i] = 0;
5095 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5096 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5097 buf[recps].content.mask[i + 1] =
5098 CPU_TO_LE16(entry->fv_mask[i]);
5101 if (rm->n_grp_count > 1) {
5102 /* Checks to see if there really is a valid result index
5105 if (chain_idx >= ICE_MAX_FV_WORDS) {
5106 ice_debug(hw, ICE_DBG_SW,
5107 "No chain index available\n");
5108 status = ICE_ERR_MAX_LIMIT;
5112 entry->chain_idx = chain_idx;
5113 buf[recps].content.result_indx =
5114 ICE_AQ_RECIPE_RESULT_EN |
5115 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5116 ICE_AQ_RECIPE_RESULT_DATA_M);
5117 ice_clear_bit(chain_idx, result_idx_bm);
5118 chain_idx = ice_find_first_bit(result_idx_bm,
5122 /* fill recipe dependencies */
5123 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5124 ICE_MAX_NUM_RECIPES);
5125 ice_set_bit(buf[recps].recipe_indx,
5126 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5127 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5131 if (rm->n_grp_count == 1) {
5132 rm->root_rid = buf[0].recipe_indx;
5133 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5134 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5135 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5136 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5137 sizeof(buf[0].recipe_bitmap),
5138 ICE_NONDMA_TO_NONDMA);
5140 status = ICE_ERR_BAD_PTR;
5143 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5144 * the recipe which is getting created if specified
5145 * by user. Usually any advanced switch filter, which results
5146 * into new extraction sequence, ended up creating a new recipe
5147 * of type ROOT and usually recipes are associated with profiles
5148 * Switch rule referreing newly created recipe, needs to have
5149 * either/or 'fwd' or 'join' priority, otherwise switch rule
5150 * evaluation will not happen correctly. In other words, if
5151 * switch rule to be evaluated on priority basis, then recipe
5152 * needs to have priority, otherwise it will be evaluated last.
5154 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5156 struct ice_recp_grp_entry *last_chain_entry;
5159 /* Allocate the last recipe that will chain the outcomes of the
5160 * other recipes together
5162 status = ice_alloc_recipe(hw, &rid);
5166 buf[recps].recipe_indx = (u8)rid;
5167 buf[recps].content.rid = (u8)rid;
5168 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5169 /* the new entry created should also be part of rg_list to
5170 * make sure we have complete recipe
5172 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5173 sizeof(*last_chain_entry));
5174 if (!last_chain_entry) {
5175 status = ICE_ERR_NO_MEMORY;
5178 last_chain_entry->rid = rid;
5179 ice_memset(&buf[recps].content.lkup_indx, 0,
5180 sizeof(buf[recps].content.lkup_indx),
5182 /* All recipes use look-up index 0 to match switch ID. */
5183 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5184 buf[recps].content.mask[0] =
5185 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5186 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5187 buf[recps].content.lkup_indx[i] =
5188 ICE_AQ_RECIPE_LKUP_IGNORE;
5189 buf[recps].content.mask[i] = 0;
5193 /* update r_bitmap with the recp that is used for chaining */
5194 ice_set_bit(rid, rm->r_bitmap);
5195 /* this is the recipe that chains all the other recipes so it
5196 * should not have a chaining ID to indicate the same
5198 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5199 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5201 last_chain_entry->fv_idx[i] = entry->chain_idx;
5202 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5203 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5204 ice_set_bit(entry->rid, rm->r_bitmap);
5206 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5207 if (sizeof(buf[recps].recipe_bitmap) >=
5208 sizeof(rm->r_bitmap)) {
5209 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5210 sizeof(buf[recps].recipe_bitmap),
5211 ICE_NONDMA_TO_NONDMA);
5213 status = ICE_ERR_BAD_PTR;
5216 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5218 /* To differentiate among different UDP tunnels, a meta data ID
5222 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5223 buf[recps].content.mask[i] =
5224 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5228 rm->root_rid = (u8)rid;
5230 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5234 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5235 ice_release_change_lock(hw);
5239 /* Every recipe that just got created add it to the recipe
5242 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5243 struct ice_switch_info *sw = hw->switch_info;
5244 bool is_root, idx_found = false;
5245 struct ice_sw_recipe *recp;
5246 u16 idx, buf_idx = 0;
5248 /* find buffer index for copying some data */
5249 for (idx = 0; idx < rm->n_grp_count; idx++)
5250 if (buf[idx].recipe_indx == entry->rid) {
5256 status = ICE_ERR_OUT_OF_RANGE;
5260 recp = &sw->recp_list[entry->rid];
5261 is_root = (rm->root_rid == entry->rid);
5262 recp->is_root = is_root;
5264 recp->root_rid = entry->rid;
5265 recp->big_recp = (is_root && rm->n_grp_count > 1);
5267 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5268 entry->r_group.n_val_pairs *
5269 sizeof(struct ice_fv_word),
5270 ICE_NONDMA_TO_NONDMA);
5272 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5273 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5275 /* Copy non-result fv index values and masks to recipe. This
5276 * call will also update the result recipe bitmask.
5278 ice_collect_result_idx(&buf[buf_idx], recp);
5280 /* for non-root recipes, also copy to the root, this allows
5281 * easier matching of a complete chained recipe
5284 ice_collect_result_idx(&buf[buf_idx],
5285 &sw->recp_list[rm->root_rid]);
5287 recp->n_ext_words = entry->r_group.n_val_pairs;
5288 recp->chain_idx = entry->chain_idx;
5289 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5290 recp->n_grp_count = rm->n_grp_count;
5291 recp->tun_type = rm->tun_type;
5292 recp->recp_created = true;
5307 * ice_create_recipe_group - creates recipe group
5308 * @hw: pointer to hardware structure
5309 * @rm: recipe management list entry
5310 * @lkup_exts: lookup elements
5312 static enum ice_status
5313 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5314 struct ice_prot_lkup_ext *lkup_exts)
5316 enum ice_status status;
5319 rm->n_grp_count = 0;
5321 /* Create recipes for words that are marked not done by packing them
5324 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5325 &rm->rg_list, &recp_count);
5327 rm->n_grp_count += recp_count;
5328 rm->n_ext_words = lkup_exts->n_val_words;
5329 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5330 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5331 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5332 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5339 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5340 * @hw: pointer to hardware structure
5341 * @lkups: lookup elements or match criteria for the advanced recipe, one
5342 * structure per protocol header
5343 * @lkups_cnt: number of protocols
5344 * @bm: bitmap of field vectors to consider
5345 * @fv_list: pointer to a list that holds the returned field vectors
5347 static enum ice_status
5348 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5349 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5351 enum ice_status status;
5355 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5357 return ICE_ERR_NO_MEMORY;
5359 for (i = 0; i < lkups_cnt; i++)
5360 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5361 status = ICE_ERR_CFG;
5365 /* Find field vectors that include all specified protocol types */
5366 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5369 ice_free(hw, prot_ids);
5374 * ice_add_special_words - Add words that are not protocols, such as metadata
5375 * @rinfo: other information regarding the rule e.g. priority and action info
5376 * @lkup_exts: lookup word structure
5378 static enum ice_status
5379 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5380 struct ice_prot_lkup_ext *lkup_exts)
5382 /* If this is a tunneled packet, then add recipe index to match the
5383 * tunnel bit in the packet metadata flags.
5385 if (rinfo->tun_type != ICE_NON_TUN) {
5386 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5387 u8 word = lkup_exts->n_val_words++;
5389 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5390 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5392 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5394 return ICE_ERR_MAX_LIMIT;
5401 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5402 * @hw: pointer to hardware structure
5403 * @rinfo: other information regarding the rule e.g. priority and action info
5404 * @bm: pointer to memory for returning the bitmap of field vectors
5407 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5410 enum ice_prof_type type;
5412 switch (rinfo->tun_type) {
5414 type = ICE_PROF_NON_TUN;
5416 case ICE_ALL_TUNNELS:
5417 type = ICE_PROF_TUN_ALL;
5419 case ICE_SW_TUN_VXLAN_GPE:
5420 case ICE_SW_TUN_GENEVE:
5421 case ICE_SW_TUN_VXLAN:
5422 case ICE_SW_TUN_UDP:
5423 case ICE_SW_TUN_GTP:
5424 type = ICE_PROF_TUN_UDP;
5426 case ICE_SW_TUN_NVGRE:
5427 type = ICE_PROF_TUN_GRE;
5429 case ICE_SW_TUN_PPPOE:
5430 type = ICE_PROF_TUN_PPPOE;
5432 case ICE_SW_TUN_AND_NON_TUN:
5434 type = ICE_PROF_ALL;
5438 ice_get_sw_fv_bitmap(hw, type, bm);
5442 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5443 * @hw: pointer to hardware structure
5444 * @lkups: lookup elements or match criteria for the advanced recipe, one
5445 * structure per protocol header
5446 * @lkups_cnt: number of protocols
5447 * @rinfo: other information regarding the rule e.g. priority and action info
5448 * @rid: return the recipe ID of the recipe created
5450 static enum ice_status
5451 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5452 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5454 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5455 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5456 struct ice_prot_lkup_ext *lkup_exts;
5457 struct ice_recp_grp_entry *r_entry;
5458 struct ice_sw_fv_list_entry *fvit;
5459 struct ice_recp_grp_entry *r_tmp;
5460 struct ice_sw_fv_list_entry *tmp;
5461 enum ice_status status = ICE_SUCCESS;
5462 struct ice_sw_recipe *rm;
5463 bool match_tun = false;
5467 return ICE_ERR_PARAM;
5469 lkup_exts = (struct ice_prot_lkup_ext *)
5470 ice_malloc(hw, sizeof(*lkup_exts));
5472 return ICE_ERR_NO_MEMORY;
5474 /* Determine the number of words to be matched and if it exceeds a
5475 * recipe's restrictions
5477 for (i = 0; i < lkups_cnt; i++) {
5480 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5481 status = ICE_ERR_CFG;
5482 goto err_free_lkup_exts;
5485 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5487 status = ICE_ERR_CFG;
5488 goto err_free_lkup_exts;
5492 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5494 status = ICE_ERR_NO_MEMORY;
5495 goto err_free_lkup_exts;
5498 /* Get field vectors that contain fields extracted from all the protocol
5499 * headers being programmed.
5501 INIT_LIST_HEAD(&rm->fv_list);
5502 INIT_LIST_HEAD(&rm->rg_list);
5504 /* Get bitmap of field vectors (profiles) that are compatible with the
5505 * rule request; only these will be searched in the subsequent call to
5508 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5510 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5514 /* Group match words into recipes using preferred recipe grouping
5517 status = ice_create_recipe_group(hw, rm, lkup_exts);
5521 /* There is only profile for UDP tunnels. So, it is necessary to use a
5522 * metadata ID flag to differentiate different tunnel types. A separate
5523 * recipe needs to be used for the metadata.
5525 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5526 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5527 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5530 /* set the recipe priority if specified */
5531 rm->priority = rinfo->priority ? rinfo->priority : 0;
5533 /* Find offsets from the field vector. Pick the first one for all the
5536 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5540 /* get bitmap of all profiles the recipe will be associated with */
5541 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5542 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5544 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5545 ice_set_bit((u16)fvit->profile_id, profiles);
5548 /* Create any special protocol/offset pairs, such as looking at tunnel
5549 * bits by extracting metadata
5551 status = ice_add_special_words(rinfo, lkup_exts);
5553 goto err_free_lkup_exts;
5555 /* Look for a recipe which matches our requested fv / mask list */
5556 *rid = ice_find_recp(hw, lkup_exts);
5557 if (*rid < ICE_MAX_NUM_RECIPES)
5558 /* Success if found a recipe that match the existing criteria */
5561 /* Recipe we need does not exist, add a recipe */
5562 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5566 /* Associate all the recipes created with all the profiles in the
5567 * common field vector.
5569 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5571 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5574 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5575 (u8 *)r_bitmap, NULL);
5579 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5580 ICE_MAX_NUM_RECIPES);
5581 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5585 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5588 ice_release_change_lock(hw);
5593 /* Update profile to recipe bitmap array */
5594 ice_memcpy(profile_to_recipe[fvit->profile_id], rm->r_bitmap,
5595 sizeof(rm->r_bitmap), ICE_NONDMA_TO_NONDMA);
5597 /* Update recipe to profile bitmap array */
5598 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5599 if (ice_is_bit_set(rm->r_bitmap, j))
5600 ice_set_bit((u16)fvit->profile_id,
5601 recipe_to_profile[j]);
5604 *rid = rm->root_rid;
5605 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5606 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5608 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5609 ice_recp_grp_entry, l_entry) {
5610 LIST_DEL(&r_entry->l_entry);
5611 ice_free(hw, r_entry);
5614 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5616 LIST_DEL(&fvit->list_entry);
5621 ice_free(hw, rm->root_buf);
5626 ice_free(hw, lkup_exts);
5632 * ice_find_dummy_packet - find dummy packet by tunnel type
5634 * @lkups: lookup elements or match criteria for the advanced recipe, one
5635 * structure per protocol header
5636 * @lkups_cnt: number of protocols
5637 * @tun_type: tunnel type from the match criteria
5638 * @pkt: dummy packet to fill according to filter match criteria
5639 * @pkt_len: packet length of dummy packet
5640 * @offsets: pointer to receive the pointer to the offsets for the packet
5643 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5644 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5646 const struct ice_dummy_pkt_offsets **offsets)
5648 bool tcp = false, udp = false, ipv6 = false;
5651 if (tun_type == ICE_SW_TUN_GTP) {
5652 *pkt = dummy_udp_gtp_packet;
5653 *pkt_len = sizeof(dummy_udp_gtp_packet);
5654 *offsets = dummy_udp_gtp_packet_offsets;
5657 if (tun_type == ICE_SW_TUN_PPPOE) {
5658 *pkt = dummy_pppoe_packet;
5659 *pkt_len = sizeof(dummy_pppoe_packet);
5660 *offsets = dummy_pppoe_packet_offsets;
5663 for (i = 0; i < lkups_cnt; i++) {
5664 if (lkups[i].type == ICE_UDP_ILOS)
5666 else if (lkups[i].type == ICE_TCP_IL)
5668 else if (lkups[i].type == ICE_IPV6_OFOS)
5672 if (tun_type == ICE_ALL_TUNNELS) {
5673 *pkt = dummy_gre_udp_packet;
5674 *pkt_len = sizeof(dummy_gre_udp_packet);
5675 *offsets = dummy_gre_udp_packet_offsets;
5679 if (tun_type == ICE_SW_TUN_NVGRE) {
5681 *pkt = dummy_gre_tcp_packet;
5682 *pkt_len = sizeof(dummy_gre_tcp_packet);
5683 *offsets = dummy_gre_tcp_packet_offsets;
5687 *pkt = dummy_gre_udp_packet;
5688 *pkt_len = sizeof(dummy_gre_udp_packet);
5689 *offsets = dummy_gre_udp_packet_offsets;
5693 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5694 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5696 *pkt = dummy_udp_tun_tcp_packet;
5697 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5698 *offsets = dummy_udp_tun_tcp_packet_offsets;
5702 *pkt = dummy_udp_tun_udp_packet;
5703 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5704 *offsets = dummy_udp_tun_udp_packet_offsets;
5709 *pkt = dummy_udp_packet;
5710 *pkt_len = sizeof(dummy_udp_packet);
5711 *offsets = dummy_udp_packet_offsets;
5713 } else if (udp && ipv6) {
5714 *pkt = dummy_udp_ipv6_packet;
5715 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5716 *offsets = dummy_udp_ipv6_packet_offsets;
5718 } else if ((tcp && ipv6) || ipv6) {
5719 *pkt = dummy_tcp_ipv6_packet;
5720 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5721 *offsets = dummy_tcp_ipv6_packet_offsets;
5725 *pkt = dummy_tcp_packet;
5726 *pkt_len = sizeof(dummy_tcp_packet);
5727 *offsets = dummy_tcp_packet_offsets;
5731 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5733 * @lkups: lookup elements or match criteria for the advanced recipe, one
5734 * structure per protocol header
5735 * @lkups_cnt: number of protocols
5736 * @s_rule: stores rule information from the match criteria
5737 * @dummy_pkt: dummy packet to fill according to filter match criteria
5738 * @pkt_len: packet length of dummy packet
5739 * @offsets: offset info for the dummy packet
5741 static enum ice_status
5742 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5743 struct ice_aqc_sw_rules_elem *s_rule,
5744 const u8 *dummy_pkt, u16 pkt_len,
5745 const struct ice_dummy_pkt_offsets *offsets)
5750 /* Start with a packet with a pre-defined/dummy content. Then, fill
5751 * in the header values to be looked up or matched.
5753 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5755 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5757 for (i = 0; i < lkups_cnt; i++) {
5758 enum ice_protocol_type type;
5759 u16 offset = 0, len = 0, j;
5762 /* find the start of this layer; it should be found since this
5763 * was already checked when search for the dummy packet
5765 type = lkups[i].type;
5766 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5767 if (type == offsets[j].type) {
5768 offset = offsets[j].offset;
5773 /* this should never happen in a correct calling sequence */
5775 return ICE_ERR_PARAM;
5777 switch (lkups[i].type) {
5780 len = sizeof(struct ice_ether_hdr);
5783 len = sizeof(struct ice_ethtype_hdr);
5787 len = sizeof(struct ice_ipv4_hdr);
5791 len = sizeof(struct ice_ipv6_hdr);
5796 len = sizeof(struct ice_l4_hdr);
5799 len = sizeof(struct ice_sctp_hdr);
5802 len = sizeof(struct ice_nvgre);
5807 len = sizeof(struct ice_udp_tnl_hdr);
5811 len = sizeof(struct ice_udp_gtp_hdr);
5814 return ICE_ERR_PARAM;
5817 /* the length should be a word multiple */
5818 if (len % ICE_BYTES_PER_WORD)
5821 /* We have the offset to the header start, the length, the
5822 * caller's header values and mask. Use this information to
5823 * copy the data into the dummy packet appropriately based on
5824 * the mask. Note that we need to only write the bits as
5825 * indicated by the mask to make sure we don't improperly write
5826 * over any significant packet data.
5828 for (j = 0; j < len / sizeof(u16); j++)
5829 if (((u16 *)&lkups[i].m_u)[j])
5830 ((u16 *)(pkt + offset))[j] =
5831 (((u16 *)(pkt + offset))[j] &
5832 ~((u16 *)&lkups[i].m_u)[j]) |
5833 (((u16 *)&lkups[i].h_u)[j] &
5834 ((u16 *)&lkups[i].m_u)[j]);
5837 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5843 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5844 * @hw: pointer to the hardware structure
5845 * @tun_type: tunnel type
5846 * @pkt: dummy packet to fill in
5847 * @offsets: offset info for the dummy packet
5849 static enum ice_status
5850 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5851 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5856 case ICE_SW_TUN_AND_NON_TUN:
5857 case ICE_SW_TUN_VXLAN_GPE:
5858 case ICE_SW_TUN_VXLAN:
5859 case ICE_SW_TUN_UDP:
5860 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
5864 case ICE_SW_TUN_GENEVE:
5865 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
5870 /* Nothing needs to be done for this tunnel type */
5874 /* Find the outer UDP protocol header and insert the port number */
5875 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5876 if (offsets[i].type == ICE_UDP_OF) {
5877 struct ice_l4_hdr *hdr;
5880 offset = offsets[i].offset;
5881 hdr = (struct ice_l4_hdr *)&pkt[offset];
5882 hdr->dst_port = open_port << 8 | open_port >> 8;
5892 * ice_find_adv_rule_entry - Search a rule entry
5893 * @hw: pointer to the hardware structure
5894 * @lkups: lookup elements or match criteria for the advanced recipe, one
5895 * structure per protocol header
5896 * @lkups_cnt: number of protocols
5897 * @recp_id: recipe ID for which we are finding the rule
5898 * @rinfo: other information regarding the rule e.g. priority and action info
5900 * Helper function to search for a given advance rule entry
5901 * Returns pointer to entry storing the rule if found
5903 static struct ice_adv_fltr_mgmt_list_entry *
5904 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5905 u16 lkups_cnt, u8 recp_id,
5906 struct ice_adv_rule_info *rinfo)
5908 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5909 struct ice_switch_info *sw = hw->switch_info;
5912 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5913 ice_adv_fltr_mgmt_list_entry, list_entry) {
5914 bool lkups_matched = true;
5916 if (lkups_cnt != list_itr->lkups_cnt)
5918 for (i = 0; i < list_itr->lkups_cnt; i++)
5919 if (memcmp(&list_itr->lkups[i], &lkups[i],
5921 lkups_matched = false;
5924 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5925 rinfo->tun_type == list_itr->rule_info.tun_type &&
5933 * ice_adv_add_update_vsi_list
5934 * @hw: pointer to the hardware structure
5935 * @m_entry: pointer to current adv filter management list entry
5936 * @cur_fltr: filter information from the book keeping entry
5937 * @new_fltr: filter information with the new VSI to be added
5939 * Call AQ command to add or update previously created VSI list with new VSI.
5941 * Helper function to do book keeping associated with adding filter information
5942 * The algorithm to do the booking keeping is described below :
5943 * When a VSI needs to subscribe to a given advanced filter
5944 * if only one VSI has been added till now
5945 * Allocate a new VSI list and add two VSIs
5946 * to this list using switch rule command
5947 * Update the previously created switch rule with the
5948 * newly created VSI list ID
5949 * if a VSI list was previously created
5950 * Add the new VSI to the previously created VSI list set
5951 * using the update switch rule command
5953 static enum ice_status
5954 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5955 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5956 struct ice_adv_rule_info *cur_fltr,
5957 struct ice_adv_rule_info *new_fltr)
5959 enum ice_status status;
5960 u16 vsi_list_id = 0;
5962 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5963 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5964 return ICE_ERR_NOT_IMPL;
5966 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5967 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5968 return ICE_ERR_ALREADY_EXISTS;
5970 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5971 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5972 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5973 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5974 return ICE_ERR_NOT_IMPL;
5976 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5977 /* Only one entry existed in the mapping and it was not already
5978 * a part of a VSI list. So, create a VSI list with the old and
5981 struct ice_fltr_info tmp_fltr;
5982 u16 vsi_handle_arr[2];
5984 /* A rule already exists with the new VSI being added */
5985 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5986 new_fltr->sw_act.fwd_id.hw_vsi_id)
5987 return ICE_ERR_ALREADY_EXISTS;
5989 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5990 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5991 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5997 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5998 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5999 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6000 /* Update the previous switch rule of "forward to VSI" to
6003 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6007 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6008 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6009 m_entry->vsi_list_info =
6010 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6013 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6015 if (!m_entry->vsi_list_info)
6018 /* A rule already exists with the new VSI being added */
6019 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6022 /* Update the previously created VSI list set with
6023 * the new VSI ID passed in
6025 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6027 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6029 ice_aqc_opc_update_sw_rules,
6031 /* update VSI list mapping info with new VSI ID */
6033 ice_set_bit(vsi_handle,
6034 m_entry->vsi_list_info->vsi_map);
6037 m_entry->vsi_count++;
6042 * ice_add_adv_rule - helper function to create an advanced switch rule
6043 * @hw: pointer to the hardware structure
6044 * @lkups: information on the words that needs to be looked up. All words
6045 * together makes one recipe
6046 * @lkups_cnt: num of entries in the lkups array
6047 * @rinfo: other information related to the rule that needs to be programmed
6048 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6049 * ignored is case of error.
6051 * This function can program only 1 rule at a time. The lkups is used to
6052 * describe the all the words that forms the "lookup" portion of the recipe.
6053 * These words can span multiple protocols. Callers to this function need to
6054 * pass in a list of protocol headers with lookup information along and mask
6055 * that determines which words are valid from the given protocol header.
6056 * rinfo describes other information related to this rule such as forwarding
6057 * IDs, priority of this rule, etc.
6060 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6061 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6062 struct ice_rule_query_data *added_entry)
6064 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6065 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6066 const struct ice_dummy_pkt_offsets *pkt_offsets;
6067 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6068 struct LIST_HEAD_TYPE *rule_head;
6069 struct ice_switch_info *sw;
6070 enum ice_status status;
6071 const u8 *pkt = NULL;
6077 return ICE_ERR_PARAM;
6079 /* get # of words we need to match */
6081 for (i = 0; i < lkups_cnt; i++) {
6084 ptr = (u16 *)&lkups[i].m_u;
6085 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6089 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6090 return ICE_ERR_PARAM;
6092 /* make sure that we can locate a dummy packet */
6093 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6096 status = ICE_ERR_PARAM;
6097 goto err_ice_add_adv_rule;
6100 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6101 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6102 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6103 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6106 vsi_handle = rinfo->sw_act.vsi_handle;
6107 if (!ice_is_vsi_valid(hw, vsi_handle))
6108 return ICE_ERR_PARAM;
6110 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6111 rinfo->sw_act.fwd_id.hw_vsi_id =
6112 ice_get_hw_vsi_num(hw, vsi_handle);
6113 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6114 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6116 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6119 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6121 /* we have to add VSI to VSI_LIST and increment vsi_count.
6122 * Also Update VSI list so that we can change forwarding rule
6123 * if the rule already exists, we will check if it exists with
6124 * same vsi_id, if not then add it to the VSI list if it already
6125 * exists if not then create a VSI list and add the existing VSI
6126 * ID and the new VSI ID to the list
6127 * We will add that VSI to the list
6129 status = ice_adv_add_update_vsi_list(hw, m_entry,
6130 &m_entry->rule_info,
6133 added_entry->rid = rid;
6134 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6135 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6139 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6140 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6142 return ICE_ERR_NO_MEMORY;
6143 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6144 switch (rinfo->sw_act.fltr_act) {
6145 case ICE_FWD_TO_VSI:
6146 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6147 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6148 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6151 act |= ICE_SINGLE_ACT_TO_Q;
6152 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6153 ICE_SINGLE_ACT_Q_INDEX_M;
6155 case ICE_FWD_TO_QGRP:
6156 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6157 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6158 act |= ICE_SINGLE_ACT_TO_Q;
6159 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6160 ICE_SINGLE_ACT_Q_INDEX_M;
6161 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6162 ICE_SINGLE_ACT_Q_REGION_M;
6164 case ICE_DROP_PACKET:
6165 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6166 ICE_SINGLE_ACT_VALID_BIT;
6169 status = ICE_ERR_CFG;
6170 goto err_ice_add_adv_rule;
6173 /* set the rule LOOKUP type based on caller specified 'RX'
6174 * instead of hardcoding it to be either LOOKUP_TX/RX
6176 * for 'RX' set the source to be the port number
6177 * for 'TX' set the source to be the source HW VSI number (determined
6181 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6182 s_rule->pdata.lkup_tx_rx.src =
6183 CPU_TO_LE16(hw->port_info->lport);
6185 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6186 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6189 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6190 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6192 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
6195 if (rinfo->tun_type != ICE_NON_TUN) {
6196 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6197 s_rule->pdata.lkup_tx_rx.hdr,
6200 goto err_ice_add_adv_rule;
6203 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6204 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6207 goto err_ice_add_adv_rule;
6208 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6209 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6211 status = ICE_ERR_NO_MEMORY;
6212 goto err_ice_add_adv_rule;
6215 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6216 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6217 ICE_NONDMA_TO_NONDMA);
6218 if (!adv_fltr->lkups) {
6219 status = ICE_ERR_NO_MEMORY;
6220 goto err_ice_add_adv_rule;
6223 adv_fltr->lkups_cnt = lkups_cnt;
6224 adv_fltr->rule_info = *rinfo;
6225 adv_fltr->rule_info.fltr_rule_id =
6226 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6227 sw = hw->switch_info;
6228 sw->recp_list[rid].adv_rule = true;
6229 rule_head = &sw->recp_list[rid].filt_rules;
6231 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6232 struct ice_fltr_info tmp_fltr;
6234 tmp_fltr.fltr_rule_id =
6235 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6236 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6237 tmp_fltr.fwd_id.hw_vsi_id =
6238 ice_get_hw_vsi_num(hw, vsi_handle);
6239 tmp_fltr.vsi_handle = vsi_handle;
6240 /* Update the previous switch rule of "forward to VSI" to
6243 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6245 goto err_ice_add_adv_rule;
6246 adv_fltr->vsi_count = 1;
6249 /* Add rule entry to book keeping list */
6250 LIST_ADD(&adv_fltr->list_entry, rule_head);
6252 added_entry->rid = rid;
6253 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6254 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6256 err_ice_add_adv_rule:
6257 if (status && adv_fltr) {
6258 ice_free(hw, adv_fltr->lkups);
6259 ice_free(hw, adv_fltr);
6262 ice_free(hw, s_rule);
6268 * ice_adv_rem_update_vsi_list
6269 * @hw: pointer to the hardware structure
6270 * @vsi_handle: VSI handle of the VSI to remove
6271 * @fm_list: filter management entry for which the VSI list management needs to
6274 static enum ice_status
6275 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6276 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6278 struct ice_vsi_list_map_info *vsi_list_info;
6279 enum ice_sw_lkup_type lkup_type;
6280 enum ice_status status;
6283 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6284 fm_list->vsi_count == 0)
6285 return ICE_ERR_PARAM;
6287 /* A rule with the VSI being removed does not exist */
6288 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6289 return ICE_ERR_DOES_NOT_EXIST;
6291 lkup_type = ICE_SW_LKUP_LAST;
6292 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6293 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6294 ice_aqc_opc_update_sw_rules,
6299 fm_list->vsi_count--;
6300 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6301 vsi_list_info = fm_list->vsi_list_info;
6302 if (fm_list->vsi_count == 1) {
6303 struct ice_fltr_info tmp_fltr;
6306 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6308 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6309 return ICE_ERR_OUT_OF_RANGE;
6311 /* Make sure VSI list is empty before removing it below */
6312 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6314 ice_aqc_opc_update_sw_rules,
6318 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6319 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6320 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6321 tmp_fltr.fwd_id.hw_vsi_id =
6322 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6323 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6324 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6326 /* Update the previous switch rule of "MAC forward to VSI" to
6327 * "MAC fwd to VSI list"
6329 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6331 ice_debug(hw, ICE_DBG_SW,
6332 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6333 tmp_fltr.fwd_id.hw_vsi_id, status);
6337 /* Remove the VSI list since it is no longer used */
6338 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6340 ice_debug(hw, ICE_DBG_SW,
6341 "Failed to remove VSI list %d, error %d\n",
6342 vsi_list_id, status);
6346 LIST_DEL(&vsi_list_info->list_entry);
6347 ice_free(hw, vsi_list_info);
6348 fm_list->vsi_list_info = NULL;
6355 * ice_rem_adv_rule - removes existing advanced switch rule
6356 * @hw: pointer to the hardware structure
6357 * @lkups: information on the words that needs to be looked up. All words
6358 * together makes one recipe
6359 * @lkups_cnt: num of entries in the lkups array
6360 * @rinfo: Its the pointer to the rule information for the rule
6362 * This function can be used to remove 1 rule at a time. The lkups is
6363 * used to describe all the words that forms the "lookup" portion of the
6364 * rule. These words can span multiple protocols. Callers to this function
6365 * need to pass in a list of protocol headers with lookup information along
6366 * and mask that determines which words are valid from the given protocol
6367 * header. rinfo describes other information related to this rule such as
6368 * forwarding IDs, priority of this rule, etc.
6371 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6372 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6374 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6375 struct ice_prot_lkup_ext lkup_exts;
6376 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6377 enum ice_status status = ICE_SUCCESS;
6378 bool remove_rule = false;
6379 u16 i, rid, vsi_handle;
6381 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6382 for (i = 0; i < lkups_cnt; i++) {
6385 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6388 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6393 /* Create any special protocol/offset pairs, such as looking at tunnel
6394 * bits by extracting metadata
6396 status = ice_add_special_words(rinfo, &lkup_exts);
6400 rid = ice_find_recp(hw, &lkup_exts);
6401 /* If did not find a recipe that match the existing criteria */
6402 if (rid == ICE_MAX_NUM_RECIPES)
6403 return ICE_ERR_PARAM;
6405 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6406 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6407 /* the rule is already removed */
6410 ice_acquire_lock(rule_lock);
6411 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6413 } else if (list_elem->vsi_count > 1) {
6414 list_elem->vsi_list_info->ref_cnt--;
6415 remove_rule = false;
6416 vsi_handle = rinfo->sw_act.vsi_handle;
6417 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6419 vsi_handle = rinfo->sw_act.vsi_handle;
6420 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6422 ice_release_lock(rule_lock);
6425 if (list_elem->vsi_count == 0)
6428 ice_release_lock(rule_lock);
6430 struct ice_aqc_sw_rules_elem *s_rule;
6433 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6435 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6438 return ICE_ERR_NO_MEMORY;
6439 s_rule->pdata.lkup_tx_rx.act = 0;
6440 s_rule->pdata.lkup_tx_rx.index =
6441 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6442 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6443 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6445 ice_aqc_opc_remove_sw_rules, NULL);
6446 if (status == ICE_SUCCESS) {
6447 ice_acquire_lock(rule_lock);
6448 LIST_DEL(&list_elem->list_entry);
6449 ice_free(hw, list_elem->lkups);
6450 ice_free(hw, list_elem);
6451 ice_release_lock(rule_lock);
6453 ice_free(hw, s_rule);
6459 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6460 * @hw: pointer to the hardware structure
6461 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6463 * This function is used to remove 1 rule at a time. The removal is based on
6464 * the remove_entry parameter. This function will remove rule for a given
6465 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6468 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6469 struct ice_rule_query_data *remove_entry)
6471 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6472 struct LIST_HEAD_TYPE *list_head;
6473 struct ice_adv_rule_info rinfo;
6474 struct ice_switch_info *sw;
6476 sw = hw->switch_info;
6477 if (!sw->recp_list[remove_entry->rid].recp_created)
6478 return ICE_ERR_PARAM;
6479 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6480 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6482 if (list_itr->rule_info.fltr_rule_id ==
6483 remove_entry->rule_id) {
6484 rinfo = list_itr->rule_info;
6485 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6486 return ice_rem_adv_rule(hw, list_itr->lkups,
6487 list_itr->lkups_cnt, &rinfo);
6490 return ICE_ERR_PARAM;
6494 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6496 * @hw: pointer to the hardware structure
6497 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6499 * This function is used to remove all the rules for a given VSI and as soon
6500 * as removing a rule fails, it will return immediately with the error code,
6501 * else it will return ICE_SUCCESS
6504 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6506 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6507 struct ice_vsi_list_map_info *map_info;
6508 struct LIST_HEAD_TYPE *list_head;
6509 struct ice_adv_rule_info rinfo;
6510 struct ice_switch_info *sw;
6511 enum ice_status status;
6512 u16 vsi_list_id = 0;
6515 sw = hw->switch_info;
6516 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6517 if (!sw->recp_list[rid].recp_created)
6519 if (!sw->recp_list[rid].adv_rule)
6521 list_head = &sw->recp_list[rid].filt_rules;
6523 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6524 ice_adv_fltr_mgmt_list_entry, list_entry) {
6525 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6529 rinfo = list_itr->rule_info;
6530 rinfo.sw_act.vsi_handle = vsi_handle;
6531 status = ice_rem_adv_rule(hw, list_itr->lkups,
6532 list_itr->lkups_cnt, &rinfo);
6542 * ice_replay_fltr - Replay all the filters stored by a specific list head
6543 * @hw: pointer to the hardware structure
6544 * @list_head: list for which filters needs to be replayed
6545 * @recp_id: Recipe ID for which rules need to be replayed
6547 static enum ice_status
6548 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6550 struct ice_fltr_mgmt_list_entry *itr;
6551 struct LIST_HEAD_TYPE l_head;
6552 enum ice_status status = ICE_SUCCESS;
6554 if (LIST_EMPTY(list_head))
6557 /* Move entries from the given list_head to a temporary l_head so that
6558 * they can be replayed. Otherwise when trying to re-add the same
6559 * filter, the function will return already exists
6561 LIST_REPLACE_INIT(list_head, &l_head);
6563 /* Mark the given list_head empty by reinitializing it so filters
6564 * could be added again by *handler
6566 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6568 struct ice_fltr_list_entry f_entry;
6570 f_entry.fltr_info = itr->fltr_info;
6571 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6572 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6573 if (status != ICE_SUCCESS)
6578 /* Add a filter per VSI separately */
6583 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6585 if (!ice_is_vsi_valid(hw, vsi_handle))
6588 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6589 f_entry.fltr_info.vsi_handle = vsi_handle;
6590 f_entry.fltr_info.fwd_id.hw_vsi_id =
6591 ice_get_hw_vsi_num(hw, vsi_handle);
6592 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6593 if (recp_id == ICE_SW_LKUP_VLAN)
6594 status = ice_add_vlan_internal(hw, &f_entry);
6596 status = ice_add_rule_internal(hw, recp_id,
6598 if (status != ICE_SUCCESS)
6603 /* Clear the filter management list */
6604 ice_rem_sw_rule_info(hw, &l_head);
6609 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6610 * @hw: pointer to the hardware structure
6612 * NOTE: This function does not clean up partially added filters on error.
6613 * It is up to caller of the function to issue a reset or fail early.
6615 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6617 struct ice_switch_info *sw = hw->switch_info;
6618 enum ice_status status = ICE_SUCCESS;
6621 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6622 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6624 status = ice_replay_fltr(hw, i, head);
6625 if (status != ICE_SUCCESS)
6632 * ice_replay_vsi_fltr - Replay filters for requested VSI
6633 * @hw: pointer to the hardware structure
6634 * @vsi_handle: driver VSI handle
6635 * @recp_id: Recipe ID for which rules need to be replayed
6636 * @list_head: list for which filters need to be replayed
6638 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6639 * It is required to pass valid VSI handle.
6641 static enum ice_status
6642 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6643 struct LIST_HEAD_TYPE *list_head)
6645 struct ice_fltr_mgmt_list_entry *itr;
6646 enum ice_status status = ICE_SUCCESS;
6649 if (LIST_EMPTY(list_head))
6651 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6653 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6655 struct ice_fltr_list_entry f_entry;
6657 f_entry.fltr_info = itr->fltr_info;
6658 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6659 itr->fltr_info.vsi_handle == vsi_handle) {
6660 /* update the src in case it is VSI num */
6661 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6662 f_entry.fltr_info.src = hw_vsi_id;
6663 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6664 if (status != ICE_SUCCESS)
6668 if (!itr->vsi_list_info ||
6669 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6671 /* Clearing it so that the logic can add it back */
6672 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6673 f_entry.fltr_info.vsi_handle = vsi_handle;
6674 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6675 /* update the src in case it is VSI num */
6676 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6677 f_entry.fltr_info.src = hw_vsi_id;
6678 if (recp_id == ICE_SW_LKUP_VLAN)
6679 status = ice_add_vlan_internal(hw, &f_entry);
6681 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6682 if (status != ICE_SUCCESS)
6690 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6691 * @hw: pointer to the hardware structure
6692 * @vsi_handle: driver VSI handle
6693 * @list_head: list for which filters need to be replayed
6695 * Replay the advanced rule for the given VSI.
6697 static enum ice_status
6698 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6699 struct LIST_HEAD_TYPE *list_head)
6701 struct ice_rule_query_data added_entry = { 0 };
6702 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6703 enum ice_status status = ICE_SUCCESS;
6705 if (LIST_EMPTY(list_head))
6707 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6709 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6710 u16 lk_cnt = adv_fltr->lkups_cnt;
6712 if (vsi_handle != rinfo->sw_act.vsi_handle)
6714 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6723 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6724 * @hw: pointer to the hardware structure
6725 * @vsi_handle: driver VSI handle
6727 * Replays filters for requested VSI via vsi_handle.
6729 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6731 struct ice_switch_info *sw = hw->switch_info;
6732 enum ice_status status;
6735 /* Update the recipes that were created */
6736 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6737 struct LIST_HEAD_TYPE *head;
6739 head = &sw->recp_list[i].filt_replay_rules;
6740 if (!sw->recp_list[i].adv_rule)
6741 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6743 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6744 if (status != ICE_SUCCESS)
6752 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6753 * @hw: pointer to the HW struct
6755 * Deletes the filter replay rules.
6757 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6759 struct ice_switch_info *sw = hw->switch_info;
6765 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6766 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6767 struct LIST_HEAD_TYPE *l_head;
6769 l_head = &sw->recp_list[i].filt_replay_rules;
6770 if (!sw->recp_list[i].adv_rule)
6771 ice_rem_sw_rule_info(hw, l_head);
6773 ice_rem_adv_rule_info(hw, l_head);