1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
61 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
69 { ICE_PROTOCOL_LAST, 0 },
73 u8 dummy_gre_tcp_packet[] = {
74 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
78 0x08, 0x00, /* ICE_ETYPE_OL 12 */
80 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x2F, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
86 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
87 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
90 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x00, 0x00, 0x00,
94 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
103 0x50, 0x02, 0x20, 0x00,
104 0x00, 0x00, 0x00, 0x00
108 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
110 { ICE_ETYPE_OL, 12 },
111 { ICE_IPV4_OFOS, 14 },
115 { ICE_UDP_ILOS, 76 },
116 { ICE_PROTOCOL_LAST, 0 },
120 u8 dummy_gre_udp_packet[] = {
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x08, 0x00, /* ICE_ETYPE_OL 12 */
127 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x2F, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
133 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
141 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
148 0x00, 0x08, 0x00, 0x00,
152 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
154 { ICE_ETYPE_OL, 12 },
155 { ICE_IPV4_OFOS, 14 },
159 { ICE_VXLAN_GPE, 42 },
163 { ICE_PROTOCOL_LAST, 0 },
167 u8 dummy_udp_tun_tcp_packet[] = {
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_OL 12 */
174 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
175 0x00, 0x01, 0x00, 0x00,
176 0x40, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
181 0x00, 0x46, 0x00, 0x00,
183 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
184 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
187 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
191 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
192 0x00, 0x01, 0x00, 0x00,
193 0x40, 0x06, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
198 0x00, 0x00, 0x00, 0x00,
199 0x00, 0x00, 0x00, 0x00,
200 0x50, 0x02, 0x20, 0x00,
201 0x00, 0x00, 0x00, 0x00
205 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
207 { ICE_ETYPE_OL, 12 },
208 { ICE_IPV4_OFOS, 14 },
212 { ICE_VXLAN_GPE, 42 },
215 { ICE_UDP_ILOS, 84 },
216 { ICE_PROTOCOL_LAST, 0 },
220 u8 dummy_udp_tun_udp_packet[] = {
221 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x08, 0x00, /* ICE_ETYPE_OL 12 */
227 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
228 0x00, 0x01, 0x00, 0x00,
229 0x00, 0x11, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
234 0x00, 0x3a, 0x00, 0x00,
236 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
237 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
251 0x00, 0x08, 0x00, 0x00,
255 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
257 { ICE_ETYPE_OL, 12 },
258 { ICE_IPV4_OFOS, 14 },
259 { ICE_UDP_ILOS, 34 },
260 { ICE_PROTOCOL_LAST, 0 },
264 dummy_udp_packet[] = {
265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x08, 0x00, /* ICE_ETYPE_OL 12 */
271 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
272 0x00, 0x01, 0x00, 0x00,
273 0x00, 0x11, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00,
277 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
278 0x00, 0x08, 0x00, 0x00,
280 0x00, 0x00, /* 2 bytes for 4 byte alignment */
284 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
286 { ICE_ETYPE_OL, 12 },
287 { ICE_IPV4_OFOS, 14 },
289 { ICE_PROTOCOL_LAST, 0 },
293 dummy_tcp_packet[] = {
294 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
295 0x00, 0x00, 0x00, 0x00,
296 0x00, 0x00, 0x00, 0x00,
298 0x08, 0x00, /* ICE_ETYPE_OL 12 */
300 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
301 0x00, 0x01, 0x00, 0x00,
302 0x00, 0x06, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
307 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x50, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, /* 2 bytes for 4 byte alignment */
316 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
318 { ICE_ETYPE_OL, 12 },
319 { ICE_IPV6_OFOS, 14 },
321 { ICE_PROTOCOL_LAST, 0 },
325 dummy_tcp_ipv6_packet[] = {
326 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
332 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
333 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x50, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, /* 2 bytes for 4 byte alignment */
353 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
355 { ICE_ETYPE_OL, 12 },
356 { ICE_IPV6_OFOS, 14 },
357 { ICE_UDP_ILOS, 54 },
358 { ICE_PROTOCOL_LAST, 0 },
362 dummy_udp_ipv6_packet[] = {
363 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
364 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00,
367 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
369 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
370 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
381 0x00, 0x08, 0x00, 0x00,
383 0x00, 0x00, /* 2 bytes for 4 byte alignment */
387 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
389 { ICE_IPV4_OFOS, 14 },
392 { ICE_PROTOCOL_LAST, 0 },
396 dummy_udp_gtp_packet[] = {
397 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
402 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
403 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x11, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
409 0x00, 0x1c, 0x00, 0x00,
411 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
412 0x00, 0x00, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x85,
415 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
416 0x00, 0x00, 0x00, 0x00,
420 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
422 { ICE_VLAN_OFOS, 14},
424 { ICE_PROTOCOL_LAST, 0 },
428 dummy_pppoe_packet[] = {
429 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
430 0x00, 0x00, 0x00, 0x00,
431 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
436 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 18 */
437 0x00, 0x4e, 0x00, 0x21,
439 0x45, 0x00, 0x00, 0x30, /* PDU */
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x11, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
446 /* this is a recipe to profile association bitmap */
447 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
448 ICE_MAX_NUM_PROFILES);
450 /* this is a profile to recipe association bitmap */
451 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
452 ICE_MAX_NUM_RECIPES);
454 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
457 * ice_collect_result_idx - copy result index values
458 * @buf: buffer that contains the result index
459 * @recp: the recipe struct to copy data into
461 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
462 struct ice_sw_recipe *recp)
464 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
465 ice_set_bit(buf->content.result_indx &
466 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
470 * ice_init_possible_res_bm - initialize possible result bitmap
471 * @pos_result_bm: pointer to the bitmap to initialize
473 static void ice_init_possible_res_bm(ice_bitmap_t *pos_result_bm)
477 ice_zero_bitmap(pos_result_bm, ICE_MAX_FV_WORDS);
479 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
480 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
481 ice_set_bit(bit, pos_result_bm);
485 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
486 * @hw: pointer to hardware structure
487 * @recps: struct that we need to populate
488 * @rid: recipe ID that we are populating
489 * @refresh_required: true if we should get recipe to profile mapping from FW
491 * This function is used to populate all the necessary entries into our
492 * bookkeeping so that we have a current list of all the recipes that are
493 * programmed in the firmware.
495 static enum ice_status
496 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
497 bool *refresh_required)
499 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
500 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
501 struct ice_aqc_recipe_data_elem *tmp;
502 u16 num_recps = ICE_MAX_NUM_RECIPES;
503 struct ice_prot_lkup_ext *lkup_exts;
504 u16 i, sub_recps, fv_word_idx = 0;
505 enum ice_status status;
507 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
508 ice_init_possible_res_bm(possible_idx);
510 /* we need a buffer big enough to accommodate all the recipes */
511 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
512 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
514 return ICE_ERR_NO_MEMORY;
516 tmp[0].recipe_indx = rid;
517 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
518 /* non-zero status meaning recipe doesn't exist */
522 /* Get recipe to profile map so that we can get the fv from lkups that
523 * we read for a recipe from FW. Since we want to minimize the number of
524 * times we make this FW call, just make one call and cache the copy
525 * until a new recipe is added. This operation is only required the
526 * first time to get the changes from FW. Then to search existing
527 * entries we don't need to update the cache again until another recipe
530 if (*refresh_required) {
531 ice_get_recp_to_prof_map(hw);
532 *refresh_required = false;
535 /* Start populating all the entries for recps[rid] based on lkups from
536 * firmware. Note that we are only creating the root recipe in our
539 lkup_exts = &recps[rid].lkup_exts;
541 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
542 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
543 struct ice_recp_grp_entry *rg_entry;
544 u8 prof_id, idx, prot = 0;
548 rg_entry = (struct ice_recp_grp_entry *)
549 ice_malloc(hw, sizeof(*rg_entry));
551 status = ICE_ERR_NO_MEMORY;
555 idx = root_bufs.recipe_indx;
556 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
558 /* Mark all result indices in this chain */
559 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
560 ice_set_bit(root_bufs.content.result_indx &
561 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
563 /* get the first profile that is associated with rid */
564 prof_id = ice_find_first_bit(recipe_to_profile[idx],
565 ICE_MAX_NUM_PROFILES);
566 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
567 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
569 rg_entry->fv_idx[i] = lkup_indx;
570 rg_entry->fv_mask[i] =
571 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
573 /* If the recipe is a chained recipe then all its
574 * child recipe's result will have a result index.
575 * To fill fv_words we should not use those result
576 * index, we only need the protocol ids and offsets.
577 * We will skip all the fv_idx which stores result
578 * index in them. We also need to skip any fv_idx which
579 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
580 * valid offset value.
582 if (ice_is_bit_set(possible_idx, rg_entry->fv_idx[i]) ||
583 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
584 rg_entry->fv_idx[i] == 0)
587 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
588 rg_entry->fv_idx[i], &prot, &off);
589 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
590 lkup_exts->fv_words[fv_word_idx].off = off;
593 /* populate rg_list with the data from the child entry of this
596 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
598 /* Propagate some data to the recipe database */
599 recps[idx].is_root = is_root;
600 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
601 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
602 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
603 recps[idx].chain_idx = root_bufs.content.result_indx &
604 ~ICE_AQ_RECIPE_RESULT_EN;
605 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
607 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
613 /* Only do the following for root recipes entries */
614 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
615 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
616 recps[idx].root_rid = root_bufs.content.rid &
617 ~ICE_AQ_RECIPE_ID_IS_ROOT;
618 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
621 /* Complete initialization of the root recipe entry */
622 lkup_exts->n_val_words = fv_word_idx;
623 recps[rid].big_recp = (num_recps > 1);
624 recps[rid].n_grp_count = num_recps;
625 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
626 ice_memdup(hw, tmp, recps[rid].n_grp_count *
627 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
628 if (!recps[rid].root_buf)
631 /* Copy result indexes */
632 ice_memcpy(recps[rid].res_idxs, result_bm, sizeof(recps[rid].res_idxs),
633 ICE_NONDMA_TO_NONDMA);
634 recps[rid].recp_created = true;
642 * ice_get_recp_to_prof_map - updates recipe to profile mapping
643 * @hw: pointer to hardware structure
645 * This function is used to populate recipe_to_profile matrix where index to
646 * this array is the recipe ID and the element is the mapping of which profiles
647 * is this recipe mapped to.
650 ice_get_recp_to_prof_map(struct ice_hw *hw)
652 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
655 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
658 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
659 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
660 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
662 ice_memcpy(profile_to_recipe[i], r_bitmap,
663 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
664 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
665 if (ice_is_bit_set(r_bitmap, j))
666 ice_set_bit(i, recipe_to_profile[j]);
671 * ice_init_def_sw_recp - initialize the recipe book keeping tables
672 * @hw: pointer to the HW struct
674 * Allocate memory for the entire recipe table and initialize the structures/
675 * entries corresponding to basic recipes.
677 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
679 struct ice_sw_recipe *recps;
682 recps = (struct ice_sw_recipe *)
683 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
685 return ICE_ERR_NO_MEMORY;
687 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
688 recps[i].root_rid = i;
689 INIT_LIST_HEAD(&recps[i].filt_rules);
690 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
691 INIT_LIST_HEAD(&recps[i].rg_list);
692 ice_init_lock(&recps[i].filt_rule_lock);
695 hw->switch_info->recp_list = recps;
701 * ice_aq_get_sw_cfg - get switch configuration
702 * @hw: pointer to the hardware structure
703 * @buf: pointer to the result buffer
704 * @buf_size: length of the buffer available for response
705 * @req_desc: pointer to requested descriptor
706 * @num_elems: pointer to number of elements
707 * @cd: pointer to command details structure or NULL
709 * Get switch configuration (0x0200) to be placed in 'buff'.
710 * This admin command returns information such as initial VSI/port number
711 * and switch ID it belongs to.
713 * NOTE: *req_desc is both an input/output parameter.
714 * The caller of this function first calls this function with *request_desc set
715 * to 0. If the response from f/w has *req_desc set to 0, all the switch
716 * configuration information has been returned; if non-zero (meaning not all
717 * the information was returned), the caller should call this function again
718 * with *req_desc set to the previous value returned by f/w to get the
719 * next block of switch configuration information.
721 * *num_elems is output only parameter. This reflects the number of elements
722 * in response buffer. The caller of this function to use *num_elems while
723 * parsing the response buffer.
725 static enum ice_status
726 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
727 u16 buf_size, u16 *req_desc, u16 *num_elems,
728 struct ice_sq_cd *cd)
730 struct ice_aqc_get_sw_cfg *cmd;
731 enum ice_status status;
732 struct ice_aq_desc desc;
734 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
735 cmd = &desc.params.get_sw_conf;
736 cmd->element = CPU_TO_LE16(*req_desc);
738 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
740 *req_desc = LE16_TO_CPU(cmd->element);
741 *num_elems = LE16_TO_CPU(cmd->num_elems);
748 * ice_alloc_sw - allocate resources specific to switch
749 * @hw: pointer to the HW struct
750 * @ena_stats: true to turn on VEB stats
751 * @shared_res: true for shared resource, false for dedicated resource
752 * @sw_id: switch ID returned
753 * @counter_id: VEB counter ID returned
755 * allocates switch resources (SWID and VEB counter) (0x0208)
758 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
761 struct ice_aqc_alloc_free_res_elem *sw_buf;
762 struct ice_aqc_res_elem *sw_ele;
763 enum ice_status status;
766 buf_len = sizeof(*sw_buf);
767 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
768 ice_malloc(hw, buf_len);
770 return ICE_ERR_NO_MEMORY;
772 /* Prepare buffer for switch ID.
773 * The number of resource entries in buffer is passed as 1 since only a
774 * single switch/VEB instance is allocated, and hence a single sw_id
777 sw_buf->num_elems = CPU_TO_LE16(1);
779 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
780 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
781 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
783 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
784 ice_aqc_opc_alloc_res, NULL);
787 goto ice_alloc_sw_exit;
789 sw_ele = &sw_buf->elem[0];
790 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
793 /* Prepare buffer for VEB Counter */
794 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
795 struct ice_aqc_alloc_free_res_elem *counter_buf;
796 struct ice_aqc_res_elem *counter_ele;
798 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
799 ice_malloc(hw, buf_len);
801 status = ICE_ERR_NO_MEMORY;
802 goto ice_alloc_sw_exit;
805 /* The number of resource entries in buffer is passed as 1 since
806 * only a single switch/VEB instance is allocated, and hence a
807 * single VEB counter is requested.
809 counter_buf->num_elems = CPU_TO_LE16(1);
810 counter_buf->res_type =
811 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
812 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
813 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
817 ice_free(hw, counter_buf);
818 goto ice_alloc_sw_exit;
820 counter_ele = &counter_buf->elem[0];
821 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
822 ice_free(hw, counter_buf);
826 ice_free(hw, sw_buf);
831 * ice_free_sw - free resources specific to switch
832 * @hw: pointer to the HW struct
833 * @sw_id: switch ID returned
834 * @counter_id: VEB counter ID returned
836 * free switch resources (SWID and VEB counter) (0x0209)
838 * NOTE: This function frees multiple resources. It continues
839 * releasing other resources even after it encounters error.
840 * The error code returned is the last error it encountered.
842 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
844 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
845 enum ice_status status, ret_status;
848 buf_len = sizeof(*sw_buf);
849 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
850 ice_malloc(hw, buf_len);
852 return ICE_ERR_NO_MEMORY;
854 /* Prepare buffer to free for switch ID res.
855 * The number of resource entries in buffer is passed as 1 since only a
856 * single switch/VEB instance is freed, and hence a single sw_id
859 sw_buf->num_elems = CPU_TO_LE16(1);
860 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
861 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
863 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
864 ice_aqc_opc_free_res, NULL);
867 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
869 /* Prepare buffer to free for VEB Counter resource */
870 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
871 ice_malloc(hw, buf_len);
873 ice_free(hw, sw_buf);
874 return ICE_ERR_NO_MEMORY;
877 /* The number of resource entries in buffer is passed as 1 since only a
878 * single switch/VEB instance is freed, and hence a single VEB counter
881 counter_buf->num_elems = CPU_TO_LE16(1);
882 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
883 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
885 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
886 ice_aqc_opc_free_res, NULL);
888 ice_debug(hw, ICE_DBG_SW,
889 "VEB counter resource could not be freed\n");
893 ice_free(hw, counter_buf);
894 ice_free(hw, sw_buf);
900 * @hw: pointer to the HW struct
901 * @vsi_ctx: pointer to a VSI context struct
902 * @cd: pointer to command details structure or NULL
904 * Add a VSI context to the hardware (0x0210)
907 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
908 struct ice_sq_cd *cd)
910 struct ice_aqc_add_update_free_vsi_resp *res;
911 struct ice_aqc_add_get_update_free_vsi *cmd;
912 struct ice_aq_desc desc;
913 enum ice_status status;
915 cmd = &desc.params.vsi_cmd;
916 res = &desc.params.add_update_free_vsi_res;
918 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
920 if (!vsi_ctx->alloc_from_pool)
921 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
922 ICE_AQ_VSI_IS_VALID);
924 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
926 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
928 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
929 sizeof(vsi_ctx->info), cd);
932 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
933 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
934 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
942 * @hw: pointer to the HW struct
943 * @vsi_ctx: pointer to a VSI context struct
944 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
945 * @cd: pointer to command details structure or NULL
947 * Free VSI context info from hardware (0x0213)
950 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
951 bool keep_vsi_alloc, struct ice_sq_cd *cd)
953 struct ice_aqc_add_update_free_vsi_resp *resp;
954 struct ice_aqc_add_get_update_free_vsi *cmd;
955 struct ice_aq_desc desc;
956 enum ice_status status;
958 cmd = &desc.params.vsi_cmd;
959 resp = &desc.params.add_update_free_vsi_res;
961 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
963 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
965 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
967 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
969 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
970 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
978 * @hw: pointer to the HW struct
979 * @vsi_ctx: pointer to a VSI context struct
980 * @cd: pointer to command details structure or NULL
982 * Update VSI context in the hardware (0x0211)
985 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
986 struct ice_sq_cd *cd)
988 struct ice_aqc_add_update_free_vsi_resp *resp;
989 struct ice_aqc_add_get_update_free_vsi *cmd;
990 struct ice_aq_desc desc;
991 enum ice_status status;
993 cmd = &desc.params.vsi_cmd;
994 resp = &desc.params.add_update_free_vsi_res;
996 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
998 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1000 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1002 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1003 sizeof(vsi_ctx->info), cd);
1006 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1007 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1014 * ice_is_vsi_valid - check whether the VSI is valid or not
1015 * @hw: pointer to the HW struct
1016 * @vsi_handle: VSI handle
1018 * check whether the VSI is valid or not
1020 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1022 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1026 * ice_get_hw_vsi_num - return the HW VSI number
1027 * @hw: pointer to the HW struct
1028 * @vsi_handle: VSI handle
1030 * return the HW VSI number
1031 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1033 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1035 return hw->vsi_ctx[vsi_handle]->vsi_num;
1039 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1040 * @hw: pointer to the HW struct
1041 * @vsi_handle: VSI handle
1043 * return the VSI context entry for a given VSI handle
1045 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1047 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1051 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1052 * @hw: pointer to the HW struct
1053 * @vsi_handle: VSI handle
1054 * @vsi: VSI context pointer
1056 * save the VSI context entry for a given VSI handle
1059 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1061 hw->vsi_ctx[vsi_handle] = vsi;
1065 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1066 * @hw: pointer to the HW struct
1067 * @vsi_handle: VSI handle
1069 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1071 struct ice_vsi_ctx *vsi;
1074 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1077 ice_for_each_traffic_class(i) {
1078 if (vsi->lan_q_ctx[i]) {
1079 ice_free(hw, vsi->lan_q_ctx[i]);
1080 vsi->lan_q_ctx[i] = NULL;
1086 * ice_clear_vsi_ctx - clear the VSI context entry
1087 * @hw: pointer to the HW struct
1088 * @vsi_handle: VSI handle
1090 * clear the VSI context entry
1092 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1094 struct ice_vsi_ctx *vsi;
1096 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1098 ice_clear_vsi_q_ctx(hw, vsi_handle);
1100 hw->vsi_ctx[vsi_handle] = NULL;
1105 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1106 * @hw: pointer to the HW struct
1108 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1112 for (i = 0; i < ICE_MAX_VSI; i++)
1113 ice_clear_vsi_ctx(hw, i);
1117 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1118 * @hw: pointer to the HW struct
1119 * @vsi_handle: unique VSI handle provided by drivers
1120 * @vsi_ctx: pointer to a VSI context struct
1121 * @cd: pointer to command details structure or NULL
1123 * Add a VSI context to the hardware also add it into the VSI handle list.
1124 * If this function gets called after reset for existing VSIs then update
1125 * with the new HW VSI number in the corresponding VSI handle list entry.
1128 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1129 struct ice_sq_cd *cd)
1131 struct ice_vsi_ctx *tmp_vsi_ctx;
1132 enum ice_status status;
1134 if (vsi_handle >= ICE_MAX_VSI)
1135 return ICE_ERR_PARAM;
1136 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1139 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1141 /* Create a new VSI context */
1142 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1143 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1145 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1146 return ICE_ERR_NO_MEMORY;
1148 *tmp_vsi_ctx = *vsi_ctx;
1150 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1152 /* update with new HW VSI num */
1153 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1160 * ice_free_vsi- free VSI context from hardware and VSI handle list
1161 * @hw: pointer to the HW struct
1162 * @vsi_handle: unique VSI handle
1163 * @vsi_ctx: pointer to a VSI context struct
1164 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1165 * @cd: pointer to command details structure or NULL
1167 * Free VSI context info from hardware as well as from VSI handle list
1170 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1171 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1173 enum ice_status status;
1175 if (!ice_is_vsi_valid(hw, vsi_handle))
1176 return ICE_ERR_PARAM;
1177 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1178 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1180 ice_clear_vsi_ctx(hw, vsi_handle);
1186 * @hw: pointer to the HW struct
1187 * @vsi_handle: unique VSI handle
1188 * @vsi_ctx: pointer to a VSI context struct
1189 * @cd: pointer to command details structure or NULL
1191 * Update VSI context in the hardware
1194 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1195 struct ice_sq_cd *cd)
1197 if (!ice_is_vsi_valid(hw, vsi_handle))
1198 return ICE_ERR_PARAM;
1199 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1200 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1204 * ice_aq_get_vsi_params
1205 * @hw: pointer to the HW struct
1206 * @vsi_ctx: pointer to a VSI context struct
1207 * @cd: pointer to command details structure or NULL
1209 * Get VSI context info from hardware (0x0212)
1212 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1213 struct ice_sq_cd *cd)
1215 struct ice_aqc_add_get_update_free_vsi *cmd;
1216 struct ice_aqc_get_vsi_resp *resp;
1217 struct ice_aq_desc desc;
1218 enum ice_status status;
1220 cmd = &desc.params.vsi_cmd;
1221 resp = &desc.params.get_vsi_resp;
1223 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1225 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1227 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1228 sizeof(vsi_ctx->info), cd);
1230 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1232 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1233 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1240 * ice_aq_add_update_mir_rule - add/update a mirror rule
1241 * @hw: pointer to the HW struct
1242 * @rule_type: Rule Type
1243 * @dest_vsi: VSI number to which packets will be mirrored
1244 * @count: length of the list
1245 * @mr_buf: buffer for list of mirrored VSI numbers
1246 * @cd: pointer to command details structure or NULL
1249 * Add/Update Mirror Rule (0x260).
1252 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1253 u16 count, struct ice_mir_rule_buf *mr_buf,
1254 struct ice_sq_cd *cd, u16 *rule_id)
1256 struct ice_aqc_add_update_mir_rule *cmd;
1257 struct ice_aq_desc desc;
1258 enum ice_status status;
1259 __le16 *mr_list = NULL;
1262 switch (rule_type) {
1263 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1264 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1265 /* Make sure count and mr_buf are set for these rule_types */
1266 if (!(count && mr_buf))
1267 return ICE_ERR_PARAM;
1269 buf_size = count * sizeof(__le16);
1270 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1272 return ICE_ERR_NO_MEMORY;
1274 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1275 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1276 /* Make sure count and mr_buf are not set for these
1279 if (count || mr_buf)
1280 return ICE_ERR_PARAM;
1283 ice_debug(hw, ICE_DBG_SW,
1284 "Error due to unsupported rule_type %u\n", rule_type);
1285 return ICE_ERR_OUT_OF_RANGE;
1288 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1290 /* Pre-process 'mr_buf' items for add/update of virtual port
1291 * ingress/egress mirroring (but not physical port ingress/egress
1297 for (i = 0; i < count; i++) {
1300 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1302 /* Validate specified VSI number, make sure it is less
1303 * than ICE_MAX_VSI, if not return with error.
1305 if (id >= ICE_MAX_VSI) {
1306 ice_debug(hw, ICE_DBG_SW,
1307 "Error VSI index (%u) out-of-range\n",
1309 ice_free(hw, mr_list);
1310 return ICE_ERR_OUT_OF_RANGE;
1313 /* add VSI to mirror rule */
1316 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1317 else /* remove VSI from mirror rule */
1318 mr_list[i] = CPU_TO_LE16(id);
1322 cmd = &desc.params.add_update_rule;
1323 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1324 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1325 ICE_AQC_RULE_ID_VALID_M);
1326 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1327 cmd->num_entries = CPU_TO_LE16(count);
1328 cmd->dest = CPU_TO_LE16(dest_vsi);
1330 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1332 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1334 ice_free(hw, mr_list);
1340 * ice_aq_delete_mir_rule - delete a mirror rule
1341 * @hw: pointer to the HW struct
1342 * @rule_id: Mirror rule ID (to be deleted)
1343 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1344 * otherwise it is returned to the shared pool
1345 * @cd: pointer to command details structure or NULL
1347 * Delete Mirror Rule (0x261).
1350 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1351 struct ice_sq_cd *cd)
1353 struct ice_aqc_delete_mir_rule *cmd;
1354 struct ice_aq_desc desc;
1356 /* rule_id should be in the range 0...63 */
1357 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1358 return ICE_ERR_OUT_OF_RANGE;
1360 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1362 cmd = &desc.params.del_rule;
1363 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1364 cmd->rule_id = CPU_TO_LE16(rule_id);
1367 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1369 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1373 * ice_aq_alloc_free_vsi_list
1374 * @hw: pointer to the HW struct
1375 * @vsi_list_id: VSI list ID returned or used for lookup
1376 * @lkup_type: switch rule filter lookup type
1377 * @opc: switch rules population command type - pass in the command opcode
1379 * allocates or free a VSI list resource
1381 static enum ice_status
1382 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1383 enum ice_sw_lkup_type lkup_type,
1384 enum ice_adminq_opc opc)
1386 struct ice_aqc_alloc_free_res_elem *sw_buf;
1387 struct ice_aqc_res_elem *vsi_ele;
1388 enum ice_status status;
1391 buf_len = sizeof(*sw_buf);
1392 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1393 ice_malloc(hw, buf_len);
1395 return ICE_ERR_NO_MEMORY;
1396 sw_buf->num_elems = CPU_TO_LE16(1);
1398 if (lkup_type == ICE_SW_LKUP_MAC ||
1399 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1400 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1401 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1402 lkup_type == ICE_SW_LKUP_PROMISC ||
1403 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1404 lkup_type == ICE_SW_LKUP_LAST) {
1405 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1406 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1408 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1410 status = ICE_ERR_PARAM;
1411 goto ice_aq_alloc_free_vsi_list_exit;
1414 if (opc == ice_aqc_opc_free_res)
1415 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1417 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1419 goto ice_aq_alloc_free_vsi_list_exit;
1421 if (opc == ice_aqc_opc_alloc_res) {
1422 vsi_ele = &sw_buf->elem[0];
1423 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1426 ice_aq_alloc_free_vsi_list_exit:
1427 ice_free(hw, sw_buf);
1432 * ice_aq_set_storm_ctrl - Sets storm control configuration
1433 * @hw: pointer to the HW struct
1434 * @bcast_thresh: represents the upper threshold for broadcast storm control
1435 * @mcast_thresh: represents the upper threshold for multicast storm control
1436 * @ctl_bitmask: storm control control knobs
1438 * Sets the storm control configuration (0x0280)
1441 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1444 struct ice_aqc_storm_cfg *cmd;
1445 struct ice_aq_desc desc;
1447 cmd = &desc.params.storm_conf;
1449 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1451 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1452 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1453 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1455 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1459 * ice_aq_get_storm_ctrl - gets storm control configuration
1460 * @hw: pointer to the HW struct
1461 * @bcast_thresh: represents the upper threshold for broadcast storm control
1462 * @mcast_thresh: represents the upper threshold for multicast storm control
1463 * @ctl_bitmask: storm control control knobs
1465 * Gets the storm control configuration (0x0281)
1468 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1471 enum ice_status status;
1472 struct ice_aq_desc desc;
1474 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1476 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1478 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1481 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1484 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1487 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1494 * ice_aq_sw_rules - add/update/remove switch rules
1495 * @hw: pointer to the HW struct
1496 * @rule_list: pointer to switch rule population list
1497 * @rule_list_sz: total size of the rule list in bytes
1498 * @num_rules: number of switch rules in the rule_list
1499 * @opc: switch rules population command type - pass in the command opcode
1500 * @cd: pointer to command details structure or NULL
1502 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1504 static enum ice_status
1505 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1506 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1508 struct ice_aq_desc desc;
1510 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1512 if (opc != ice_aqc_opc_add_sw_rules &&
1513 opc != ice_aqc_opc_update_sw_rules &&
1514 opc != ice_aqc_opc_remove_sw_rules)
1515 return ICE_ERR_PARAM;
1517 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1519 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1520 desc.params.sw_rules.num_rules_fltr_entry_index =
1521 CPU_TO_LE16(num_rules);
1522 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1526 * ice_aq_add_recipe - add switch recipe
1527 * @hw: pointer to the HW struct
1528 * @s_recipe_list: pointer to switch rule population list
1529 * @num_recipes: number of switch recipes in the list
1530 * @cd: pointer to command details structure or NULL
1535 ice_aq_add_recipe(struct ice_hw *hw,
1536 struct ice_aqc_recipe_data_elem *s_recipe_list,
1537 u16 num_recipes, struct ice_sq_cd *cd)
1539 struct ice_aqc_add_get_recipe *cmd;
1540 struct ice_aq_desc desc;
1543 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1544 cmd = &desc.params.add_get_recipe;
1545 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1547 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1548 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1550 buf_size = num_recipes * sizeof(*s_recipe_list);
1552 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1556 * ice_aq_get_recipe - get switch recipe
1557 * @hw: pointer to the HW struct
1558 * @s_recipe_list: pointer to switch rule population list
1559 * @num_recipes: pointer to the number of recipes (input and output)
1560 * @recipe_root: root recipe number of recipe(s) to retrieve
1561 * @cd: pointer to command details structure or NULL
1565 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1566 * On output, *num_recipes will equal the number of entries returned in
1569 * The caller must supply enough space in s_recipe_list to hold all possible
1570 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1573 ice_aq_get_recipe(struct ice_hw *hw,
1574 struct ice_aqc_recipe_data_elem *s_recipe_list,
1575 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1577 struct ice_aqc_add_get_recipe *cmd;
1578 struct ice_aq_desc desc;
1579 enum ice_status status;
1582 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1583 return ICE_ERR_PARAM;
1585 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1586 cmd = &desc.params.add_get_recipe;
1587 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1589 cmd->return_index = CPU_TO_LE16(recipe_root);
1590 cmd->num_sub_recipes = 0;
1592 buf_size = *num_recipes * sizeof(*s_recipe_list);
1594 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1595 /* cppcheck-suppress constArgument */
1596 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1602 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1603 * @hw: pointer to the HW struct
1604 * @profile_id: package profile ID to associate the recipe with
1605 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1606 * @cd: pointer to command details structure or NULL
1607 * Recipe to profile association (0x0291)
1610 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1611 struct ice_sq_cd *cd)
1613 struct ice_aqc_recipe_to_profile *cmd;
1614 struct ice_aq_desc desc;
1616 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1617 cmd = &desc.params.recipe_to_profile;
1618 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1619 cmd->profile_id = CPU_TO_LE16(profile_id);
1620 /* Set the recipe ID bit in the bitmask to let the device know which
1621 * profile we are associating the recipe to
1623 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1624 ICE_NONDMA_TO_NONDMA);
1626 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1630 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1631 * @hw: pointer to the HW struct
1632 * @profile_id: package profile ID to associate the recipe with
1633 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1634 * @cd: pointer to command details structure or NULL
1635 * Associate profile ID with given recipe (0x0293)
1638 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1639 struct ice_sq_cd *cd)
1641 struct ice_aqc_recipe_to_profile *cmd;
1642 struct ice_aq_desc desc;
1643 enum ice_status status;
1645 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1646 cmd = &desc.params.recipe_to_profile;
1647 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1648 cmd->profile_id = CPU_TO_LE16(profile_id);
1650 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1652 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1653 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1659 * ice_alloc_recipe - add recipe resource
1660 * @hw: pointer to the hardware structure
1661 * @rid: recipe ID returned as response to AQ call
1663 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1665 struct ice_aqc_alloc_free_res_elem *sw_buf;
1666 enum ice_status status;
1669 buf_len = sizeof(*sw_buf);
1670 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1672 return ICE_ERR_NO_MEMORY;
1674 sw_buf->num_elems = CPU_TO_LE16(1);
1675 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1676 ICE_AQC_RES_TYPE_S) |
1677 ICE_AQC_RES_TYPE_FLAG_SHARED);
1678 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1679 ice_aqc_opc_alloc_res, NULL);
1681 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1682 ice_free(hw, sw_buf);
1687 /* ice_init_port_info - Initialize port_info with switch configuration data
1688 * @pi: pointer to port_info
1689 * @vsi_port_num: VSI number or port number
1690 * @type: Type of switch element (port or VSI)
1691 * @swid: switch ID of the switch the element is attached to
1692 * @pf_vf_num: PF or VF number
1693 * @is_vf: true if the element is a VF, false otherwise
1696 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1697 u16 swid, u16 pf_vf_num, bool is_vf)
1700 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1701 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1703 pi->pf_vf_num = pf_vf_num;
1705 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1706 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1709 ice_debug(pi->hw, ICE_DBG_SW,
1710 "incorrect VSI/port type received\n");
1715 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1716 * @hw: pointer to the hardware structure
1718 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1720 struct ice_aqc_get_sw_cfg_resp *rbuf;
1721 enum ice_status status;
1722 u16 num_total_ports;
1728 num_total_ports = 1;
1730 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1731 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1734 return ICE_ERR_NO_MEMORY;
1736 /* Multiple calls to ice_aq_get_sw_cfg may be required
1737 * to get all the switch configuration information. The need
1738 * for additional calls is indicated by ice_aq_get_sw_cfg
1739 * writing a non-zero value in req_desc
1742 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1743 &req_desc, &num_elems, NULL);
1748 for (i = 0; i < num_elems; i++) {
1749 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1750 u16 pf_vf_num, swid, vsi_port_num;
1754 ele = rbuf[i].elements;
1755 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1756 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1758 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1759 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1761 swid = LE16_TO_CPU(ele->swid);
1763 if (LE16_TO_CPU(ele->pf_vf_num) &
1764 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1767 type = LE16_TO_CPU(ele->vsi_port_num) >>
1768 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1771 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1772 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1773 if (j == num_total_ports) {
1774 ice_debug(hw, ICE_DBG_SW,
1775 "more ports than expected\n");
1776 status = ICE_ERR_CFG;
1779 ice_init_port_info(hw->port_info,
1780 vsi_port_num, type, swid,
1788 } while (req_desc && !status);
1791 ice_free(hw, (void *)rbuf);
1796 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1797 * @hw: pointer to the hardware structure
1798 * @fi: filter info structure to fill/update
1800 * This helper function populates the lb_en and lan_en elements of the provided
1801 * ice_fltr_info struct using the switch's type and characteristics of the
1802 * switch rule being configured.
1804 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1808 if ((fi->flag & ICE_FLTR_TX) &&
1809 (fi->fltr_act == ICE_FWD_TO_VSI ||
1810 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1811 fi->fltr_act == ICE_FWD_TO_Q ||
1812 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1813 /* Setting LB for prune actions will result in replicated
1814 * packets to the internal switch that will be dropped.
1816 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1819 /* Set lan_en to TRUE if
1820 * 1. The switch is a VEB AND
1822 * 2.1 The lookup is a directional lookup like ethertype,
1823 * promiscuous, ethertype-MAC, promiscuous-VLAN
1824 * and default-port OR
1825 * 2.2 The lookup is VLAN, OR
1826 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1827 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1831 * The switch is a VEPA.
1833 * In all other cases, the LAN enable has to be set to false.
1836 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1837 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1838 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1839 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1840 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1841 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1842 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1843 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1844 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1845 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1854 * ice_fill_sw_rule - Helper function to fill switch rule structure
1855 * @hw: pointer to the hardware structure
1856 * @f_info: entry containing packet forwarding information
1857 * @s_rule: switch rule structure to be filled in based on mac_entry
1858 * @opc: switch rules population command type - pass in the command opcode
1861 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1862 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1864 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1872 if (opc == ice_aqc_opc_remove_sw_rules) {
1873 s_rule->pdata.lkup_tx_rx.act = 0;
1874 s_rule->pdata.lkup_tx_rx.index =
1875 CPU_TO_LE16(f_info->fltr_rule_id);
1876 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1880 eth_hdr_sz = sizeof(dummy_eth_header);
1881 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1883 /* initialize the ether header with a dummy header */
1884 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1885 ice_fill_sw_info(hw, f_info);
1887 switch (f_info->fltr_act) {
1888 case ICE_FWD_TO_VSI:
1889 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1890 ICE_SINGLE_ACT_VSI_ID_M;
1891 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1892 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1893 ICE_SINGLE_ACT_VALID_BIT;
1895 case ICE_FWD_TO_VSI_LIST:
1896 act |= ICE_SINGLE_ACT_VSI_LIST;
1897 act |= (f_info->fwd_id.vsi_list_id <<
1898 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1899 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1900 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1901 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1902 ICE_SINGLE_ACT_VALID_BIT;
1905 act |= ICE_SINGLE_ACT_TO_Q;
1906 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1907 ICE_SINGLE_ACT_Q_INDEX_M;
1909 case ICE_DROP_PACKET:
1910 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1911 ICE_SINGLE_ACT_VALID_BIT;
1913 case ICE_FWD_TO_QGRP:
1914 q_rgn = f_info->qgrp_size > 0 ?
1915 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1916 act |= ICE_SINGLE_ACT_TO_Q;
1917 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1918 ICE_SINGLE_ACT_Q_INDEX_M;
1919 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1920 ICE_SINGLE_ACT_Q_REGION_M;
1927 act |= ICE_SINGLE_ACT_LB_ENABLE;
1929 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1931 switch (f_info->lkup_type) {
1932 case ICE_SW_LKUP_MAC:
1933 daddr = f_info->l_data.mac.mac_addr;
1935 case ICE_SW_LKUP_VLAN:
1936 vlan_id = f_info->l_data.vlan.vlan_id;
1937 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1938 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1939 act |= ICE_SINGLE_ACT_PRUNE;
1940 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1943 case ICE_SW_LKUP_ETHERTYPE_MAC:
1944 daddr = f_info->l_data.ethertype_mac.mac_addr;
1946 case ICE_SW_LKUP_ETHERTYPE:
1947 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1948 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1950 case ICE_SW_LKUP_MAC_VLAN:
1951 daddr = f_info->l_data.mac_vlan.mac_addr;
1952 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1954 case ICE_SW_LKUP_PROMISC_VLAN:
1955 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1957 case ICE_SW_LKUP_PROMISC:
1958 daddr = f_info->l_data.mac_vlan.mac_addr;
1964 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1965 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1966 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1968 /* Recipe set depending on lookup type */
1969 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1970 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1971 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1974 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1975 ICE_NONDMA_TO_NONDMA);
1977 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1978 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1979 *off = CPU_TO_BE16(vlan_id);
1982 /* Create the switch rule with the final dummy Ethernet header */
1983 if (opc != ice_aqc_opc_update_sw_rules)
1984 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1988 * ice_add_marker_act
1989 * @hw: pointer to the hardware structure
1990 * @m_ent: the management entry for which sw marker needs to be added
1991 * @sw_marker: sw marker to tag the Rx descriptor with
1992 * @l_id: large action resource ID
1994 * Create a large action to hold software marker and update the switch rule
1995 * entry pointed by m_ent with newly created large action
1997 static enum ice_status
1998 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1999 u16 sw_marker, u16 l_id)
2001 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2002 /* For software marker we need 3 large actions
2003 * 1. FWD action: FWD TO VSI or VSI LIST
2004 * 2. GENERIC VALUE action to hold the profile ID
2005 * 3. GENERIC VALUE action to hold the software marker ID
2007 const u16 num_lg_acts = 3;
2008 enum ice_status status;
2014 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2015 return ICE_ERR_PARAM;
2017 /* Create two back-to-back switch rules and submit them to the HW using
2018 * one memory buffer:
2022 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2023 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2024 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2026 return ICE_ERR_NO_MEMORY;
2028 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2030 /* Fill in the first switch rule i.e. large action */
2031 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2032 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2033 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2035 /* First action VSI forwarding or VSI list forwarding depending on how
2038 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2039 m_ent->fltr_info.fwd_id.hw_vsi_id;
2041 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2042 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2043 ICE_LG_ACT_VSI_LIST_ID_M;
2044 if (m_ent->vsi_count > 1)
2045 act |= ICE_LG_ACT_VSI_LIST;
2046 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2048 /* Second action descriptor type */
2049 act = ICE_LG_ACT_GENERIC;
2051 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2052 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2054 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2055 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2057 /* Third action Marker value */
2058 act |= ICE_LG_ACT_GENERIC;
2059 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2060 ICE_LG_ACT_GENERIC_VALUE_M;
2062 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2064 /* call the fill switch rule to fill the lookup Tx Rx structure */
2065 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2066 ice_aqc_opc_update_sw_rules);
2068 /* Update the action to point to the large action ID */
2069 rx_tx->pdata.lkup_tx_rx.act =
2070 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2071 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2072 ICE_SINGLE_ACT_PTR_VAL_M));
2074 /* Use the filter rule ID of the previously created rule with single
2075 * act. Once the update happens, hardware will treat this as large
2078 rx_tx->pdata.lkup_tx_rx.index =
2079 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2081 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2082 ice_aqc_opc_update_sw_rules, NULL);
2084 m_ent->lg_act_idx = l_id;
2085 m_ent->sw_marker_id = sw_marker;
2088 ice_free(hw, lg_act);
2093 * ice_add_counter_act - add/update filter rule with counter action
2094 * @hw: pointer to the hardware structure
2095 * @m_ent: the management entry for which counter needs to be added
2096 * @counter_id: VLAN counter ID returned as part of allocate resource
2097 * @l_id: large action resource ID
2099 static enum ice_status
2100 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2101 u16 counter_id, u16 l_id)
2103 struct ice_aqc_sw_rules_elem *lg_act;
2104 struct ice_aqc_sw_rules_elem *rx_tx;
2105 enum ice_status status;
2106 /* 2 actions will be added while adding a large action counter */
2107 const int num_acts = 2;
2114 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2115 return ICE_ERR_PARAM;
2117 /* Create two back-to-back switch rules and submit them to the HW using
2118 * one memory buffer:
2122 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2123 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2124 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2127 return ICE_ERR_NO_MEMORY;
2129 rx_tx = (struct ice_aqc_sw_rules_elem *)
2130 ((u8 *)lg_act + lg_act_size);
2132 /* Fill in the first switch rule i.e. large action */
2133 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2134 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2135 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2137 /* First action VSI forwarding or VSI list forwarding depending on how
2140 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2141 m_ent->fltr_info.fwd_id.hw_vsi_id;
2143 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2144 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2145 ICE_LG_ACT_VSI_LIST_ID_M;
2146 if (m_ent->vsi_count > 1)
2147 act |= ICE_LG_ACT_VSI_LIST;
2148 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2150 /* Second action counter ID */
2151 act = ICE_LG_ACT_STAT_COUNT;
2152 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2153 ICE_LG_ACT_STAT_COUNT_M;
2154 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2156 /* call the fill switch rule to fill the lookup Tx Rx structure */
2157 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2158 ice_aqc_opc_update_sw_rules);
2160 act = ICE_SINGLE_ACT_PTR;
2161 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2162 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2164 /* Use the filter rule ID of the previously created rule with single
2165 * act. Once the update happens, hardware will treat this as large
2168 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2169 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2171 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2172 ice_aqc_opc_update_sw_rules, NULL);
2174 m_ent->lg_act_idx = l_id;
2175 m_ent->counter_index = counter_id;
2178 ice_free(hw, lg_act);
2183 * ice_create_vsi_list_map
2184 * @hw: pointer to the hardware structure
2185 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2186 * @num_vsi: number of VSI handles in the array
2187 * @vsi_list_id: VSI list ID generated as part of allocate resource
2189 * Helper function to create a new entry of VSI list ID to VSI mapping
2190 * using the given VSI list ID
2192 static struct ice_vsi_list_map_info *
2193 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2196 struct ice_switch_info *sw = hw->switch_info;
2197 struct ice_vsi_list_map_info *v_map;
2200 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2205 v_map->vsi_list_id = vsi_list_id;
2207 for (i = 0; i < num_vsi; i++)
2208 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2210 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2215 * ice_update_vsi_list_rule
2216 * @hw: pointer to the hardware structure
2217 * @vsi_handle_arr: array of VSI handles to form a VSI list
2218 * @num_vsi: number of VSI handles in the array
2219 * @vsi_list_id: VSI list ID generated as part of allocate resource
2220 * @remove: Boolean value to indicate if this is a remove action
2221 * @opc: switch rules population command type - pass in the command opcode
2222 * @lkup_type: lookup type of the filter
2224 * Call AQ command to add a new switch rule or update existing switch rule
2225 * using the given VSI list ID
2227 static enum ice_status
2228 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2229 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2230 enum ice_sw_lkup_type lkup_type)
2232 struct ice_aqc_sw_rules_elem *s_rule;
2233 enum ice_status status;
2239 return ICE_ERR_PARAM;
2241 if (lkup_type == ICE_SW_LKUP_MAC ||
2242 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2243 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2244 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2245 lkup_type == ICE_SW_LKUP_PROMISC ||
2246 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2247 lkup_type == ICE_SW_LKUP_LAST)
2248 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2249 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2250 else if (lkup_type == ICE_SW_LKUP_VLAN)
2251 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2252 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2254 return ICE_ERR_PARAM;
2256 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2257 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2259 return ICE_ERR_NO_MEMORY;
2260 for (i = 0; i < num_vsi; i++) {
2261 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2262 status = ICE_ERR_PARAM;
2265 /* AQ call requires hw_vsi_id(s) */
2266 s_rule->pdata.vsi_list.vsi[i] =
2267 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2270 s_rule->type = CPU_TO_LE16(type);
2271 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2272 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2274 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2277 ice_free(hw, s_rule);
2282 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2283 * @hw: pointer to the HW struct
2284 * @vsi_handle_arr: array of VSI handles to form a VSI list
2285 * @num_vsi: number of VSI handles in the array
2286 * @vsi_list_id: stores the ID of the VSI list to be created
2287 * @lkup_type: switch rule filter's lookup type
2289 static enum ice_status
2290 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2291 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2293 enum ice_status status;
2295 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2296 ice_aqc_opc_alloc_res);
2300 /* Update the newly created VSI list to include the specified VSIs */
2301 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2302 *vsi_list_id, false,
2303 ice_aqc_opc_add_sw_rules, lkup_type);
2307 * ice_create_pkt_fwd_rule
2308 * @hw: pointer to the hardware structure
2309 * @f_entry: entry containing packet forwarding information
2311 * Create switch rule with given filter information and add an entry
2312 * to the corresponding filter management list to track this switch rule
2315 static enum ice_status
2316 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2317 struct ice_fltr_list_entry *f_entry)
2319 struct ice_fltr_mgmt_list_entry *fm_entry;
2320 struct ice_aqc_sw_rules_elem *s_rule;
2321 enum ice_sw_lkup_type l_type;
2322 struct ice_sw_recipe *recp;
2323 enum ice_status status;
2325 s_rule = (struct ice_aqc_sw_rules_elem *)
2326 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2328 return ICE_ERR_NO_MEMORY;
2329 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2330 ice_malloc(hw, sizeof(*fm_entry));
2332 status = ICE_ERR_NO_MEMORY;
2333 goto ice_create_pkt_fwd_rule_exit;
2336 fm_entry->fltr_info = f_entry->fltr_info;
2338 /* Initialize all the fields for the management entry */
2339 fm_entry->vsi_count = 1;
2340 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2341 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2342 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2344 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2345 ice_aqc_opc_add_sw_rules);
2347 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2348 ice_aqc_opc_add_sw_rules, NULL);
2350 ice_free(hw, fm_entry);
2351 goto ice_create_pkt_fwd_rule_exit;
2354 f_entry->fltr_info.fltr_rule_id =
2355 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2356 fm_entry->fltr_info.fltr_rule_id =
2357 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2359 /* The book keeping entries will get removed when base driver
2360 * calls remove filter AQ command
2362 l_type = fm_entry->fltr_info.lkup_type;
2363 recp = &hw->switch_info->recp_list[l_type];
2364 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2366 ice_create_pkt_fwd_rule_exit:
2367 ice_free(hw, s_rule);
2372 * ice_update_pkt_fwd_rule
2373 * @hw: pointer to the hardware structure
2374 * @f_info: filter information for switch rule
2376 * Call AQ command to update a previously created switch rule with a
2379 static enum ice_status
2380 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2382 struct ice_aqc_sw_rules_elem *s_rule;
2383 enum ice_status status;
2385 s_rule = (struct ice_aqc_sw_rules_elem *)
2386 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2388 return ICE_ERR_NO_MEMORY;
2390 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2392 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2394 /* Update switch rule with new rule set to forward VSI list */
2395 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2396 ice_aqc_opc_update_sw_rules, NULL);
2398 ice_free(hw, s_rule);
2403 * ice_update_sw_rule_bridge_mode
2404 * @hw: pointer to the HW struct
2406 * Updates unicast switch filter rules based on VEB/VEPA mode
2408 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2410 struct ice_switch_info *sw = hw->switch_info;
2411 struct ice_fltr_mgmt_list_entry *fm_entry;
2412 enum ice_status status = ICE_SUCCESS;
2413 struct LIST_HEAD_TYPE *rule_head;
2414 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2416 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2417 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2419 ice_acquire_lock(rule_lock);
2420 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2422 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2423 u8 *addr = fi->l_data.mac.mac_addr;
2425 /* Update unicast Tx rules to reflect the selected
2428 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2429 (fi->fltr_act == ICE_FWD_TO_VSI ||
2430 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2431 fi->fltr_act == ICE_FWD_TO_Q ||
2432 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2433 status = ice_update_pkt_fwd_rule(hw, fi);
2439 ice_release_lock(rule_lock);
2445 * ice_add_update_vsi_list
2446 * @hw: pointer to the hardware structure
2447 * @m_entry: pointer to current filter management list entry
2448 * @cur_fltr: filter information from the book keeping entry
2449 * @new_fltr: filter information with the new VSI to be added
2451 * Call AQ command to add or update previously created VSI list with new VSI.
2453 * Helper function to do book keeping associated with adding filter information
2454 * The algorithm to do the book keeping is described below :
2455 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2456 * if only one VSI has been added till now
2457 * Allocate a new VSI list and add two VSIs
2458 * to this list using switch rule command
2459 * Update the previously created switch rule with the
2460 * newly created VSI list ID
2461 * if a VSI list was previously created
2462 * Add the new VSI to the previously created VSI list set
2463 * using the update switch rule command
2465 static enum ice_status
2466 ice_add_update_vsi_list(struct ice_hw *hw,
2467 struct ice_fltr_mgmt_list_entry *m_entry,
2468 struct ice_fltr_info *cur_fltr,
2469 struct ice_fltr_info *new_fltr)
2471 enum ice_status status = ICE_SUCCESS;
2472 u16 vsi_list_id = 0;
2474 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2475 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2476 return ICE_ERR_NOT_IMPL;
2478 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2479 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2480 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2481 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2482 return ICE_ERR_NOT_IMPL;
2484 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2485 /* Only one entry existed in the mapping and it was not already
2486 * a part of a VSI list. So, create a VSI list with the old and
2489 struct ice_fltr_info tmp_fltr;
2490 u16 vsi_handle_arr[2];
2492 /* A rule already exists with the new VSI being added */
2493 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2494 return ICE_ERR_ALREADY_EXISTS;
2496 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2497 vsi_handle_arr[1] = new_fltr->vsi_handle;
2498 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2500 new_fltr->lkup_type);
2504 tmp_fltr = *new_fltr;
2505 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2506 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2507 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2508 /* Update the previous switch rule of "MAC forward to VSI" to
2509 * "MAC fwd to VSI list"
2511 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2515 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2516 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2517 m_entry->vsi_list_info =
2518 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2521 /* If this entry was large action then the large action needs
2522 * to be updated to point to FWD to VSI list
2524 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2526 ice_add_marker_act(hw, m_entry,
2527 m_entry->sw_marker_id,
2528 m_entry->lg_act_idx);
2530 u16 vsi_handle = new_fltr->vsi_handle;
2531 enum ice_adminq_opc opcode;
2533 if (!m_entry->vsi_list_info)
2536 /* A rule already exists with the new VSI being added */
2537 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2540 /* Update the previously created VSI list set with
2541 * the new VSI ID passed in
2543 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2544 opcode = ice_aqc_opc_update_sw_rules;
2546 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2547 vsi_list_id, false, opcode,
2548 new_fltr->lkup_type);
2549 /* update VSI list mapping info with new VSI ID */
2551 ice_set_bit(vsi_handle,
2552 m_entry->vsi_list_info->vsi_map);
2555 m_entry->vsi_count++;
2560 * ice_find_rule_entry - Search a rule entry
2561 * @hw: pointer to the hardware structure
2562 * @recp_id: lookup type for which the specified rule needs to be searched
2563 * @f_info: rule information
2565 * Helper function to search for a given rule entry
2566 * Returns pointer to entry storing the rule if found
2568 static struct ice_fltr_mgmt_list_entry *
2569 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2571 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2572 struct ice_switch_info *sw = hw->switch_info;
2573 struct LIST_HEAD_TYPE *list_head;
2575 list_head = &sw->recp_list[recp_id].filt_rules;
2576 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2578 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2579 sizeof(f_info->l_data)) &&
2580 f_info->flag == list_itr->fltr_info.flag) {
2589 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2590 * @hw: pointer to the hardware structure
2591 * @recp_id: lookup type for which VSI lists needs to be searched
2592 * @vsi_handle: VSI handle to be found in VSI list
2593 * @vsi_list_id: VSI list ID found containing vsi_handle
2595 * Helper function to search a VSI list with single entry containing given VSI
2596 * handle element. This can be extended further to search VSI list with more
2597 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2599 static struct ice_vsi_list_map_info *
2600 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2603 struct ice_vsi_list_map_info *map_info = NULL;
2604 struct ice_switch_info *sw = hw->switch_info;
2605 struct LIST_HEAD_TYPE *list_head;
2607 list_head = &sw->recp_list[recp_id].filt_rules;
2608 if (sw->recp_list[recp_id].adv_rule) {
2609 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2611 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2612 ice_adv_fltr_mgmt_list_entry,
2614 if (list_itr->vsi_list_info) {
2615 map_info = list_itr->vsi_list_info;
2616 if (ice_is_bit_set(map_info->vsi_map,
2618 *vsi_list_id = map_info->vsi_list_id;
2624 struct ice_fltr_mgmt_list_entry *list_itr;
2626 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2627 ice_fltr_mgmt_list_entry,
2629 if (list_itr->vsi_count == 1 &&
2630 list_itr->vsi_list_info) {
2631 map_info = list_itr->vsi_list_info;
2632 if (ice_is_bit_set(map_info->vsi_map,
2634 *vsi_list_id = map_info->vsi_list_id;
2644 * ice_add_rule_internal - add rule for a given lookup type
2645 * @hw: pointer to the hardware structure
2646 * @recp_id: lookup type (recipe ID) for which rule has to be added
2647 * @f_entry: structure containing MAC forwarding information
2649 * Adds or updates the rule lists for a given recipe
2651 static enum ice_status
2652 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2653 struct ice_fltr_list_entry *f_entry)
2655 struct ice_switch_info *sw = hw->switch_info;
2656 struct ice_fltr_info *new_fltr, *cur_fltr;
2657 struct ice_fltr_mgmt_list_entry *m_entry;
2658 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2659 enum ice_status status = ICE_SUCCESS;
2661 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2662 return ICE_ERR_PARAM;
2664 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2665 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2666 f_entry->fltr_info.fwd_id.hw_vsi_id =
2667 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2669 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2671 ice_acquire_lock(rule_lock);
2672 new_fltr = &f_entry->fltr_info;
2673 if (new_fltr->flag & ICE_FLTR_RX)
2674 new_fltr->src = hw->port_info->lport;
2675 else if (new_fltr->flag & ICE_FLTR_TX)
2677 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2679 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2681 status = ice_create_pkt_fwd_rule(hw, f_entry);
2682 goto exit_add_rule_internal;
2685 cur_fltr = &m_entry->fltr_info;
2686 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2688 exit_add_rule_internal:
2689 ice_release_lock(rule_lock);
2694 * ice_remove_vsi_list_rule
2695 * @hw: pointer to the hardware structure
2696 * @vsi_list_id: VSI list ID generated as part of allocate resource
2697 * @lkup_type: switch rule filter lookup type
2699 * The VSI list should be emptied before this function is called to remove the
2702 static enum ice_status
2703 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2704 enum ice_sw_lkup_type lkup_type)
2706 struct ice_aqc_sw_rules_elem *s_rule;
2707 enum ice_status status;
2710 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2711 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2713 return ICE_ERR_NO_MEMORY;
2715 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2716 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2718 /* Free the vsi_list resource that we allocated. It is assumed that the
2719 * list is empty at this point.
2721 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2722 ice_aqc_opc_free_res);
2724 ice_free(hw, s_rule);
2729 * ice_rem_update_vsi_list
2730 * @hw: pointer to the hardware structure
2731 * @vsi_handle: VSI handle of the VSI to remove
2732 * @fm_list: filter management entry for which the VSI list management needs to
2735 static enum ice_status
2736 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2737 struct ice_fltr_mgmt_list_entry *fm_list)
2739 enum ice_sw_lkup_type lkup_type;
2740 enum ice_status status = ICE_SUCCESS;
2743 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2744 fm_list->vsi_count == 0)
2745 return ICE_ERR_PARAM;
2747 /* A rule with the VSI being removed does not exist */
2748 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2749 return ICE_ERR_DOES_NOT_EXIST;
2751 lkup_type = fm_list->fltr_info.lkup_type;
2752 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2753 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2754 ice_aqc_opc_update_sw_rules,
2759 fm_list->vsi_count--;
2760 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2762 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2763 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2764 struct ice_vsi_list_map_info *vsi_list_info =
2765 fm_list->vsi_list_info;
2768 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2770 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2771 return ICE_ERR_OUT_OF_RANGE;
2773 /* Make sure VSI list is empty before removing it below */
2774 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2776 ice_aqc_opc_update_sw_rules,
2781 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2782 tmp_fltr_info.fwd_id.hw_vsi_id =
2783 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2784 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2785 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2787 ice_debug(hw, ICE_DBG_SW,
2788 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2789 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2793 fm_list->fltr_info = tmp_fltr_info;
2796 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2797 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2798 struct ice_vsi_list_map_info *vsi_list_info =
2799 fm_list->vsi_list_info;
2801 /* Remove the VSI list since it is no longer used */
2802 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2804 ice_debug(hw, ICE_DBG_SW,
2805 "Failed to remove VSI list %d, error %d\n",
2806 vsi_list_id, status);
2810 LIST_DEL(&vsi_list_info->list_entry);
2811 ice_free(hw, vsi_list_info);
2812 fm_list->vsi_list_info = NULL;
2819 * ice_remove_rule_internal - Remove a filter rule of a given type
2821 * @hw: pointer to the hardware structure
2822 * @recp_id: recipe ID for which the rule needs to removed
2823 * @f_entry: rule entry containing filter information
2825 static enum ice_status
2826 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2827 struct ice_fltr_list_entry *f_entry)
2829 struct ice_switch_info *sw = hw->switch_info;
2830 struct ice_fltr_mgmt_list_entry *list_elem;
2831 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2832 enum ice_status status = ICE_SUCCESS;
2833 bool remove_rule = false;
2836 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2837 return ICE_ERR_PARAM;
2838 f_entry->fltr_info.fwd_id.hw_vsi_id =
2839 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2841 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2842 ice_acquire_lock(rule_lock);
2843 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2845 status = ICE_ERR_DOES_NOT_EXIST;
2849 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2851 } else if (!list_elem->vsi_list_info) {
2852 status = ICE_ERR_DOES_NOT_EXIST;
2854 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2855 /* a ref_cnt > 1 indicates that the vsi_list is being
2856 * shared by multiple rules. Decrement the ref_cnt and
2857 * remove this rule, but do not modify the list, as it
2858 * is in-use by other rules.
2860 list_elem->vsi_list_info->ref_cnt--;
2863 /* a ref_cnt of 1 indicates the vsi_list is only used
2864 * by one rule. However, the original removal request is only
2865 * for a single VSI. Update the vsi_list first, and only
2866 * remove the rule if there are no further VSIs in this list.
2868 vsi_handle = f_entry->fltr_info.vsi_handle;
2869 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2872 /* if VSI count goes to zero after updating the VSI list */
2873 if (list_elem->vsi_count == 0)
2878 /* Remove the lookup rule */
2879 struct ice_aqc_sw_rules_elem *s_rule;
2881 s_rule = (struct ice_aqc_sw_rules_elem *)
2882 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2884 status = ICE_ERR_NO_MEMORY;
2888 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2889 ice_aqc_opc_remove_sw_rules);
2891 status = ice_aq_sw_rules(hw, s_rule,
2892 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2893 ice_aqc_opc_remove_sw_rules, NULL);
2895 /* Remove a book keeping from the list */
2896 ice_free(hw, s_rule);
2901 LIST_DEL(&list_elem->list_entry);
2902 ice_free(hw, list_elem);
2905 ice_release_lock(rule_lock);
2910 * ice_aq_get_res_alloc - get allocated resources
2911 * @hw: pointer to the HW struct
2912 * @num_entries: pointer to u16 to store the number of resource entries returned
2913 * @buf: pointer to user-supplied buffer
2914 * @buf_size: size of buff
2915 * @cd: pointer to command details structure or NULL
2917 * The user-supplied buffer must be large enough to store the resource
2918 * information for all resource types. Each resource type is an
2919 * ice_aqc_get_res_resp_data_elem structure.
2922 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2923 u16 buf_size, struct ice_sq_cd *cd)
2925 struct ice_aqc_get_res_alloc *resp;
2926 enum ice_status status;
2927 struct ice_aq_desc desc;
2930 return ICE_ERR_BAD_PTR;
2932 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2933 return ICE_ERR_INVAL_SIZE;
2935 resp = &desc.params.get_res;
2937 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2938 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2940 if (!status && num_entries)
2941 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2947 * ice_aq_get_res_descs - get allocated resource descriptors
2948 * @hw: pointer to the hardware structure
2949 * @num_entries: number of resource entries in buffer
2950 * @buf: Indirect buffer to hold data parameters and response
2951 * @buf_size: size of buffer for indirect commands
2952 * @res_type: resource type
2953 * @res_shared: is resource shared
2954 * @desc_id: input - first desc ID to start; output - next desc ID
2955 * @cd: pointer to command details structure or NULL
2958 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2959 struct ice_aqc_get_allocd_res_desc_resp *buf,
2960 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2961 struct ice_sq_cd *cd)
2963 struct ice_aqc_get_allocd_res_desc *cmd;
2964 struct ice_aq_desc desc;
2965 enum ice_status status;
2967 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2969 cmd = &desc.params.get_res_desc;
2972 return ICE_ERR_PARAM;
2974 if (buf_size != (num_entries * sizeof(*buf)))
2975 return ICE_ERR_PARAM;
2977 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2979 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2980 ICE_AQC_RES_TYPE_M) | (res_shared ?
2981 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2982 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2984 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2986 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2992 * ice_add_mac - Add a MAC address based filter rule
2993 * @hw: pointer to the hardware structure
2994 * @m_list: list of MAC addresses and forwarding information
2996 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2997 * multiple unicast addresses, the function assumes that all the
2998 * addresses are unique in a given add_mac call. It doesn't
2999 * check for duplicates in this case, removing duplicates from a given
3000 * list should be taken care of in the caller of this function.
3003 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3005 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3006 struct ice_fltr_list_entry *m_list_itr;
3007 struct LIST_HEAD_TYPE *rule_head;
3008 u16 elem_sent, total_elem_left;
3009 struct ice_switch_info *sw;
3010 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3011 enum ice_status status = ICE_SUCCESS;
3012 u16 num_unicast = 0;
3016 return ICE_ERR_PARAM;
3018 sw = hw->switch_info;
3019 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3020 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3022 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3026 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3027 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3028 if (!ice_is_vsi_valid(hw, vsi_handle))
3029 return ICE_ERR_PARAM;
3030 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3031 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3032 /* update the src in case it is VSI num */
3033 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3034 return ICE_ERR_PARAM;
3035 m_list_itr->fltr_info.src = hw_vsi_id;
3036 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3037 IS_ZERO_ETHER_ADDR(add))
3038 return ICE_ERR_PARAM;
3039 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3040 /* Don't overwrite the unicast address */
3041 ice_acquire_lock(rule_lock);
3042 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3043 &m_list_itr->fltr_info)) {
3044 ice_release_lock(rule_lock);
3045 return ICE_ERR_ALREADY_EXISTS;
3047 ice_release_lock(rule_lock);
3049 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3050 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3051 m_list_itr->status =
3052 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3054 if (m_list_itr->status)
3055 return m_list_itr->status;
3059 ice_acquire_lock(rule_lock);
3060 /* Exit if no suitable entries were found for adding bulk switch rule */
3062 status = ICE_SUCCESS;
3063 goto ice_add_mac_exit;
3066 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3068 /* Allocate switch rule buffer for the bulk update for unicast */
3069 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3070 s_rule = (struct ice_aqc_sw_rules_elem *)
3071 ice_calloc(hw, num_unicast, s_rule_size);
3073 status = ICE_ERR_NO_MEMORY;
3074 goto ice_add_mac_exit;
3078 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3080 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3081 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3083 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3084 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3085 ice_aqc_opc_add_sw_rules);
3086 r_iter = (struct ice_aqc_sw_rules_elem *)
3087 ((u8 *)r_iter + s_rule_size);
3091 /* Call AQ bulk switch rule update for all unicast addresses */
3093 /* Call AQ switch rule in AQ_MAX chunk */
3094 for (total_elem_left = num_unicast; total_elem_left > 0;
3095 total_elem_left -= elem_sent) {
3096 struct ice_aqc_sw_rules_elem *entry = r_iter;
3098 elem_sent = min(total_elem_left,
3099 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3100 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3101 elem_sent, ice_aqc_opc_add_sw_rules,
3104 goto ice_add_mac_exit;
3105 r_iter = (struct ice_aqc_sw_rules_elem *)
3106 ((u8 *)r_iter + (elem_sent * s_rule_size));
3109 /* Fill up rule ID based on the value returned from FW */
3111 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3113 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3114 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3115 struct ice_fltr_mgmt_list_entry *fm_entry;
3117 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3118 f_info->fltr_rule_id =
3119 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3120 f_info->fltr_act = ICE_FWD_TO_VSI;
3121 /* Create an entry to track this MAC address */
3122 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3123 ice_malloc(hw, sizeof(*fm_entry));
3125 status = ICE_ERR_NO_MEMORY;
3126 goto ice_add_mac_exit;
3128 fm_entry->fltr_info = *f_info;
3129 fm_entry->vsi_count = 1;
3130 /* The book keeping entries will get removed when
3131 * base driver calls remove filter AQ command
3134 LIST_ADD(&fm_entry->list_entry, rule_head);
3135 r_iter = (struct ice_aqc_sw_rules_elem *)
3136 ((u8 *)r_iter + s_rule_size);
3141 ice_release_lock(rule_lock);
3143 ice_free(hw, s_rule);
3148 * ice_add_vlan_internal - Add one VLAN based filter rule
3149 * @hw: pointer to the hardware structure
3150 * @f_entry: filter entry containing one VLAN information
3152 static enum ice_status
3153 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3155 struct ice_switch_info *sw = hw->switch_info;
3156 struct ice_fltr_mgmt_list_entry *v_list_itr;
3157 struct ice_fltr_info *new_fltr, *cur_fltr;
3158 enum ice_sw_lkup_type lkup_type;
3159 u16 vsi_list_id = 0, vsi_handle;
3160 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3161 enum ice_status status = ICE_SUCCESS;
3163 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3164 return ICE_ERR_PARAM;
3166 f_entry->fltr_info.fwd_id.hw_vsi_id =
3167 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3168 new_fltr = &f_entry->fltr_info;
3170 /* VLAN ID should only be 12 bits */
3171 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3172 return ICE_ERR_PARAM;
3174 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3175 return ICE_ERR_PARAM;
3177 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3178 lkup_type = new_fltr->lkup_type;
3179 vsi_handle = new_fltr->vsi_handle;
3180 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3181 ice_acquire_lock(rule_lock);
3182 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3184 struct ice_vsi_list_map_info *map_info = NULL;
3186 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3187 /* All VLAN pruning rules use a VSI list. Check if
3188 * there is already a VSI list containing VSI that we
3189 * want to add. If found, use the same vsi_list_id for
3190 * this new VLAN rule or else create a new list.
3192 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3196 status = ice_create_vsi_list_rule(hw,
3204 /* Convert the action to forwarding to a VSI list. */
3205 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3206 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3209 status = ice_create_pkt_fwd_rule(hw, f_entry);
3211 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3214 status = ICE_ERR_DOES_NOT_EXIST;
3217 /* reuse VSI list for new rule and increment ref_cnt */
3219 v_list_itr->vsi_list_info = map_info;
3220 map_info->ref_cnt++;
3222 v_list_itr->vsi_list_info =
3223 ice_create_vsi_list_map(hw, &vsi_handle,
3227 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3228 /* Update existing VSI list to add new VSI ID only if it used
3231 cur_fltr = &v_list_itr->fltr_info;
3232 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3235 /* If VLAN rule exists and VSI list being used by this rule is
3236 * referenced by more than 1 VLAN rule. Then create a new VSI
3237 * list appending previous VSI with new VSI and update existing
3238 * VLAN rule to point to new VSI list ID
3240 struct ice_fltr_info tmp_fltr;
3241 u16 vsi_handle_arr[2];
3244 /* Current implementation only supports reusing VSI list with
3245 * one VSI count. We should never hit below condition
3247 if (v_list_itr->vsi_count > 1 &&
3248 v_list_itr->vsi_list_info->ref_cnt > 1) {
3249 ice_debug(hw, ICE_DBG_SW,
3250 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3251 status = ICE_ERR_CFG;
3256 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3259 /* A rule already exists with the new VSI being added */
3260 if (cur_handle == vsi_handle) {
3261 status = ICE_ERR_ALREADY_EXISTS;
3265 vsi_handle_arr[0] = cur_handle;
3266 vsi_handle_arr[1] = vsi_handle;
3267 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3268 &vsi_list_id, lkup_type);
3272 tmp_fltr = v_list_itr->fltr_info;
3273 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3274 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3275 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3276 /* Update the previous switch rule to a new VSI list which
3277 * includes current VSI that is requested
3279 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3283 /* before overriding VSI list map info. decrement ref_cnt of
3286 v_list_itr->vsi_list_info->ref_cnt--;
3288 /* now update to newly created list */
3289 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3290 v_list_itr->vsi_list_info =
3291 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3293 v_list_itr->vsi_count++;
3297 ice_release_lock(rule_lock);
3302 * ice_add_vlan - Add VLAN based filter rule
3303 * @hw: pointer to the hardware structure
3304 * @v_list: list of VLAN entries and forwarding information
3307 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3309 struct ice_fltr_list_entry *v_list_itr;
3312 return ICE_ERR_PARAM;
3314 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3316 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3317 return ICE_ERR_PARAM;
3318 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3319 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3320 if (v_list_itr->status)
3321 return v_list_itr->status;
3327 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3328 * @hw: pointer to the hardware structure
3329 * @mv_list: list of MAC and VLAN filters
3331 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3332 * pruning bits enabled, then it is the responsibility of the caller to make
3333 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3334 * VLAN won't be received on that VSI otherwise.
3337 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3339 struct ice_fltr_list_entry *mv_list_itr;
3341 if (!mv_list || !hw)
3342 return ICE_ERR_PARAM;
3344 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3346 enum ice_sw_lkup_type l_type =
3347 mv_list_itr->fltr_info.lkup_type;
3349 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3350 return ICE_ERR_PARAM;
3351 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3352 mv_list_itr->status =
3353 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3355 if (mv_list_itr->status)
3356 return mv_list_itr->status;
3362 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3363 * @hw: pointer to the hardware structure
3364 * @em_list: list of ether type MAC filter, MAC is optional
3366 * This function requires the caller to populate the entries in
3367 * the filter list with the necessary fields (including flags to
3368 * indicate Tx or Rx rules).
3371 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3373 struct ice_fltr_list_entry *em_list_itr;
3375 if (!em_list || !hw)
3376 return ICE_ERR_PARAM;
3378 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3380 enum ice_sw_lkup_type l_type =
3381 em_list_itr->fltr_info.lkup_type;
3383 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3384 l_type != ICE_SW_LKUP_ETHERTYPE)
3385 return ICE_ERR_PARAM;
3387 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3389 if (em_list_itr->status)
3390 return em_list_itr->status;
3396 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3397 * @hw: pointer to the hardware structure
3398 * @em_list: list of ethertype or ethertype MAC entries
3401 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3403 struct ice_fltr_list_entry *em_list_itr, *tmp;
3405 if (!em_list || !hw)
3406 return ICE_ERR_PARAM;
3408 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3410 enum ice_sw_lkup_type l_type =
3411 em_list_itr->fltr_info.lkup_type;
3413 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3414 l_type != ICE_SW_LKUP_ETHERTYPE)
3415 return ICE_ERR_PARAM;
3417 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3419 if (em_list_itr->status)
3420 return em_list_itr->status;
3426 * ice_rem_sw_rule_info
3427 * @hw: pointer to the hardware structure
3428 * @rule_head: pointer to the switch list structure that we want to delete
3431 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3433 if (!LIST_EMPTY(rule_head)) {
3434 struct ice_fltr_mgmt_list_entry *entry;
3435 struct ice_fltr_mgmt_list_entry *tmp;
3437 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3438 ice_fltr_mgmt_list_entry, list_entry) {
3439 LIST_DEL(&entry->list_entry);
3440 ice_free(hw, entry);
3446 * ice_rem_adv_rule_info
3447 * @hw: pointer to the hardware structure
3448 * @rule_head: pointer to the switch list structure that we want to delete
3451 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3453 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3454 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3456 if (LIST_EMPTY(rule_head))
3459 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3460 ice_adv_fltr_mgmt_list_entry, list_entry) {
3461 LIST_DEL(&lst_itr->list_entry);
3462 ice_free(hw, lst_itr->lkups);
3463 ice_free(hw, lst_itr);
3468 * ice_rem_all_sw_rules_info
3469 * @hw: pointer to the hardware structure
3471 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3473 struct ice_switch_info *sw = hw->switch_info;
3476 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3477 struct LIST_HEAD_TYPE *rule_head;
3479 rule_head = &sw->recp_list[i].filt_rules;
3480 if (!sw->recp_list[i].adv_rule)
3481 ice_rem_sw_rule_info(hw, rule_head);
3483 ice_rem_adv_rule_info(hw, rule_head);
3488 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3489 * @pi: pointer to the port_info structure
3490 * @vsi_handle: VSI handle to set as default
3491 * @set: true to add the above mentioned switch rule, false to remove it
3492 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3494 * add filter rule to set/unset given VSI as default VSI for the switch
3495 * (represented by swid)
3498 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3501 struct ice_aqc_sw_rules_elem *s_rule;
3502 struct ice_fltr_info f_info;
3503 struct ice_hw *hw = pi->hw;
3504 enum ice_adminq_opc opcode;
3505 enum ice_status status;
3509 if (!ice_is_vsi_valid(hw, vsi_handle))
3510 return ICE_ERR_PARAM;
3511 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3513 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3514 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3515 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3517 return ICE_ERR_NO_MEMORY;
3519 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3521 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3522 f_info.flag = direction;
3523 f_info.fltr_act = ICE_FWD_TO_VSI;
3524 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3526 if (f_info.flag & ICE_FLTR_RX) {
3527 f_info.src = pi->lport;
3528 f_info.src_id = ICE_SRC_ID_LPORT;
3530 f_info.fltr_rule_id =
3531 pi->dflt_rx_vsi_rule_id;
3532 } else if (f_info.flag & ICE_FLTR_TX) {
3533 f_info.src_id = ICE_SRC_ID_VSI;
3534 f_info.src = hw_vsi_id;
3536 f_info.fltr_rule_id =
3537 pi->dflt_tx_vsi_rule_id;
3541 opcode = ice_aqc_opc_add_sw_rules;
3543 opcode = ice_aqc_opc_remove_sw_rules;
3545 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3547 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3548 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3551 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3553 if (f_info.flag & ICE_FLTR_TX) {
3554 pi->dflt_tx_vsi_num = hw_vsi_id;
3555 pi->dflt_tx_vsi_rule_id = index;
3556 } else if (f_info.flag & ICE_FLTR_RX) {
3557 pi->dflt_rx_vsi_num = hw_vsi_id;
3558 pi->dflt_rx_vsi_rule_id = index;
3561 if (f_info.flag & ICE_FLTR_TX) {
3562 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3563 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3564 } else if (f_info.flag & ICE_FLTR_RX) {
3565 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3566 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3571 ice_free(hw, s_rule);
3576 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3577 * @hw: pointer to the hardware structure
3578 * @recp_id: lookup type for which the specified rule needs to be searched
3579 * @f_info: rule information
3581 * Helper function to search for a unicast rule entry - this is to be used
3582 * to remove unicast MAC filter that is not shared with other VSIs on the
3585 * Returns pointer to entry storing the rule if found
3587 static struct ice_fltr_mgmt_list_entry *
3588 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3589 struct ice_fltr_info *f_info)
3591 struct ice_switch_info *sw = hw->switch_info;
3592 struct ice_fltr_mgmt_list_entry *list_itr;
3593 struct LIST_HEAD_TYPE *list_head;
3595 list_head = &sw->recp_list[recp_id].filt_rules;
3596 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3598 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3599 sizeof(f_info->l_data)) &&
3600 f_info->fwd_id.hw_vsi_id ==
3601 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3602 f_info->flag == list_itr->fltr_info.flag)
3609 * ice_remove_mac - remove a MAC address based filter rule
3610 * @hw: pointer to the hardware structure
3611 * @m_list: list of MAC addresses and forwarding information
3613 * This function removes either a MAC filter rule or a specific VSI from a
3614 * VSI list for a multicast MAC address.
3616 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3617 * ice_add_mac. Caller should be aware that this call will only work if all
3618 * the entries passed into m_list were added previously. It will not attempt to
3619 * do a partial remove of entries that were found.
3622 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3624 struct ice_fltr_list_entry *list_itr, *tmp;
3625 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3628 return ICE_ERR_PARAM;
3630 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3631 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3633 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3634 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3637 if (l_type != ICE_SW_LKUP_MAC)
3638 return ICE_ERR_PARAM;
3640 vsi_handle = list_itr->fltr_info.vsi_handle;
3641 if (!ice_is_vsi_valid(hw, vsi_handle))
3642 return ICE_ERR_PARAM;
3644 list_itr->fltr_info.fwd_id.hw_vsi_id =
3645 ice_get_hw_vsi_num(hw, vsi_handle);
3646 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3647 /* Don't remove the unicast address that belongs to
3648 * another VSI on the switch, since it is not being
3651 ice_acquire_lock(rule_lock);
3652 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3653 &list_itr->fltr_info)) {
3654 ice_release_lock(rule_lock);
3655 return ICE_ERR_DOES_NOT_EXIST;
3657 ice_release_lock(rule_lock);
3659 list_itr->status = ice_remove_rule_internal(hw,
3662 if (list_itr->status)
3663 return list_itr->status;
3669 * ice_remove_vlan - Remove VLAN based filter rule
3670 * @hw: pointer to the hardware structure
3671 * @v_list: list of VLAN entries and forwarding information
3674 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3676 struct ice_fltr_list_entry *v_list_itr, *tmp;
3679 return ICE_ERR_PARAM;
3681 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3683 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3685 if (l_type != ICE_SW_LKUP_VLAN)
3686 return ICE_ERR_PARAM;
3687 v_list_itr->status = ice_remove_rule_internal(hw,
3690 if (v_list_itr->status)
3691 return v_list_itr->status;
3697 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3698 * @hw: pointer to the hardware structure
3699 * @v_list: list of MAC VLAN entries and forwarding information
3702 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3704 struct ice_fltr_list_entry *v_list_itr, *tmp;
3707 return ICE_ERR_PARAM;
3709 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3711 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3713 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3714 return ICE_ERR_PARAM;
3715 v_list_itr->status =
3716 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3718 if (v_list_itr->status)
3719 return v_list_itr->status;
3725 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3726 * @fm_entry: filter entry to inspect
3727 * @vsi_handle: VSI handle to compare with filter info
3730 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3732 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3733 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3734 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3735 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3740 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3741 * @hw: pointer to the hardware structure
3742 * @vsi_handle: VSI handle to remove filters from
3743 * @vsi_list_head: pointer to the list to add entry to
3744 * @fi: pointer to fltr_info of filter entry to copy & add
3746 * Helper function, used when creating a list of filters to remove from
3747 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3748 * original filter entry, with the exception of fltr_info.fltr_act and
3749 * fltr_info.fwd_id fields. These are set such that later logic can
3750 * extract which VSI to remove the fltr from, and pass on that information.
3752 static enum ice_status
3753 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3754 struct LIST_HEAD_TYPE *vsi_list_head,
3755 struct ice_fltr_info *fi)
3757 struct ice_fltr_list_entry *tmp;
3759 /* this memory is freed up in the caller function
3760 * once filters for this VSI are removed
3762 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3764 return ICE_ERR_NO_MEMORY;
3766 tmp->fltr_info = *fi;
3768 /* Overwrite these fields to indicate which VSI to remove filter from,
3769 * so find and remove logic can extract the information from the
3770 * list entries. Note that original entries will still have proper
3773 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3774 tmp->fltr_info.vsi_handle = vsi_handle;
3775 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3777 LIST_ADD(&tmp->list_entry, vsi_list_head);
3783 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3784 * @hw: pointer to the hardware structure
3785 * @vsi_handle: VSI handle to remove filters from
3786 * @lkup_list_head: pointer to the list that has certain lookup type filters
3787 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3789 * Locates all filters in lkup_list_head that are used by the given VSI,
3790 * and adds COPIES of those entries to vsi_list_head (intended to be used
3791 * to remove the listed filters).
3792 * Note that this means all entries in vsi_list_head must be explicitly
3793 * deallocated by the caller when done with list.
3795 static enum ice_status
3796 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3797 struct LIST_HEAD_TYPE *lkup_list_head,
3798 struct LIST_HEAD_TYPE *vsi_list_head)
3800 struct ice_fltr_mgmt_list_entry *fm_entry;
3801 enum ice_status status = ICE_SUCCESS;
3803 /* check to make sure VSI ID is valid and within boundary */
3804 if (!ice_is_vsi_valid(hw, vsi_handle))
3805 return ICE_ERR_PARAM;
3807 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3808 ice_fltr_mgmt_list_entry, list_entry) {
3809 struct ice_fltr_info *fi;
3811 fi = &fm_entry->fltr_info;
3812 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3815 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3824 * ice_determine_promisc_mask
3825 * @fi: filter info to parse
3827 * Helper function to determine which ICE_PROMISC_ mask corresponds
3828 * to given filter into.
3830 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3832 u16 vid = fi->l_data.mac_vlan.vlan_id;
3833 u8 *macaddr = fi->l_data.mac.mac_addr;
3834 bool is_tx_fltr = false;
3835 u8 promisc_mask = 0;
3837 if (fi->flag == ICE_FLTR_TX)
3840 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3841 promisc_mask |= is_tx_fltr ?
3842 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3843 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3844 promisc_mask |= is_tx_fltr ?
3845 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3846 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3847 promisc_mask |= is_tx_fltr ?
3848 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3850 promisc_mask |= is_tx_fltr ?
3851 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3853 return promisc_mask;
3857 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3858 * @hw: pointer to the hardware structure
3859 * @vsi_handle: VSI handle to retrieve info from
3860 * @promisc_mask: pointer to mask to be filled in
3861 * @vid: VLAN ID of promisc VLAN VSI
3864 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3867 struct ice_switch_info *sw = hw->switch_info;
3868 struct ice_fltr_mgmt_list_entry *itr;
3869 struct LIST_HEAD_TYPE *rule_head;
3870 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3872 if (!ice_is_vsi_valid(hw, vsi_handle))
3873 return ICE_ERR_PARAM;
3877 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3878 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3880 ice_acquire_lock(rule_lock);
3881 LIST_FOR_EACH_ENTRY(itr, rule_head,
3882 ice_fltr_mgmt_list_entry, list_entry) {
3883 /* Continue if this filter doesn't apply to this VSI or the
3884 * VSI ID is not in the VSI map for this filter
3886 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3889 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3891 ice_release_lock(rule_lock);
3897 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3898 * @hw: pointer to the hardware structure
3899 * @vsi_handle: VSI handle to retrieve info from
3900 * @promisc_mask: pointer to mask to be filled in
3901 * @vid: VLAN ID of promisc VLAN VSI
3904 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3907 struct ice_switch_info *sw = hw->switch_info;
3908 struct ice_fltr_mgmt_list_entry *itr;
3909 struct LIST_HEAD_TYPE *rule_head;
3910 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3912 if (!ice_is_vsi_valid(hw, vsi_handle))
3913 return ICE_ERR_PARAM;
3917 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3918 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3920 ice_acquire_lock(rule_lock);
3921 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3923 /* Continue if this filter doesn't apply to this VSI or the
3924 * VSI ID is not in the VSI map for this filter
3926 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3929 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3931 ice_release_lock(rule_lock);
3937 * ice_remove_promisc - Remove promisc based filter rules
3938 * @hw: pointer to the hardware structure
3939 * @recp_id: recipe ID for which the rule needs to removed
3940 * @v_list: list of promisc entries
3942 static enum ice_status
3943 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3944 struct LIST_HEAD_TYPE *v_list)
3946 struct ice_fltr_list_entry *v_list_itr, *tmp;
3948 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3950 v_list_itr->status =
3951 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3952 if (v_list_itr->status)
3953 return v_list_itr->status;
3959 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3960 * @hw: pointer to the hardware structure
3961 * @vsi_handle: VSI handle to clear mode
3962 * @promisc_mask: mask of promiscuous config bits to clear
3963 * @vid: VLAN ID to clear VLAN promiscuous
3966 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3969 struct ice_switch_info *sw = hw->switch_info;
3970 struct ice_fltr_list_entry *fm_entry, *tmp;
3971 struct LIST_HEAD_TYPE remove_list_head;
3972 struct ice_fltr_mgmt_list_entry *itr;
3973 struct LIST_HEAD_TYPE *rule_head;
3974 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3975 enum ice_status status = ICE_SUCCESS;
3978 if (!ice_is_vsi_valid(hw, vsi_handle))
3979 return ICE_ERR_PARAM;
3981 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3982 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3984 recipe_id = ICE_SW_LKUP_PROMISC;
3986 rule_head = &sw->recp_list[recipe_id].filt_rules;
3987 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3989 INIT_LIST_HEAD(&remove_list_head);
3991 ice_acquire_lock(rule_lock);
3992 LIST_FOR_EACH_ENTRY(itr, rule_head,
3993 ice_fltr_mgmt_list_entry, list_entry) {
3994 struct ice_fltr_info *fltr_info;
3995 u8 fltr_promisc_mask = 0;
3997 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3999 fltr_info = &itr->fltr_info;
4001 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4002 vid != fltr_info->l_data.mac_vlan.vlan_id)
4005 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4007 /* Skip if filter is not completely specified by given mask */
4008 if (fltr_promisc_mask & ~promisc_mask)
4011 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4015 ice_release_lock(rule_lock);
4016 goto free_fltr_list;
4019 ice_release_lock(rule_lock);
4021 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4024 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4025 ice_fltr_list_entry, list_entry) {
4026 LIST_DEL(&fm_entry->list_entry);
4027 ice_free(hw, fm_entry);
4034 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4035 * @hw: pointer to the hardware structure
4036 * @vsi_handle: VSI handle to configure
4037 * @promisc_mask: mask of promiscuous config bits
4038 * @vid: VLAN ID to set VLAN promiscuous
4041 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4043 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4044 struct ice_fltr_list_entry f_list_entry;
4045 struct ice_fltr_info new_fltr;
4046 enum ice_status status = ICE_SUCCESS;
4052 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4054 if (!ice_is_vsi_valid(hw, vsi_handle))
4055 return ICE_ERR_PARAM;
4056 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4058 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4060 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4061 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4062 new_fltr.l_data.mac_vlan.vlan_id = vid;
4063 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4065 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4066 recipe_id = ICE_SW_LKUP_PROMISC;
4069 /* Separate filters must be set for each direction/packet type
4070 * combination, so we will loop over the mask value, store the
4071 * individual type, and clear it out in the input mask as it
4074 while (promisc_mask) {
4080 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4081 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4082 pkt_type = UCAST_FLTR;
4083 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4084 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4085 pkt_type = UCAST_FLTR;
4087 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4088 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4089 pkt_type = MCAST_FLTR;
4090 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4091 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4092 pkt_type = MCAST_FLTR;
4094 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4095 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4096 pkt_type = BCAST_FLTR;
4097 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4098 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4099 pkt_type = BCAST_FLTR;
4103 /* Check for VLAN promiscuous flag */
4104 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4105 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4106 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4107 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4111 /* Set filter DA based on packet type */
4112 mac_addr = new_fltr.l_data.mac.mac_addr;
4113 if (pkt_type == BCAST_FLTR) {
4114 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4115 } else if (pkt_type == MCAST_FLTR ||
4116 pkt_type == UCAST_FLTR) {
4117 /* Use the dummy ether header DA */
4118 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4119 ICE_NONDMA_TO_NONDMA);
4120 if (pkt_type == MCAST_FLTR)
4121 mac_addr[0] |= 0x1; /* Set multicast bit */
4124 /* Need to reset this to zero for all iterations */
4127 new_fltr.flag |= ICE_FLTR_TX;
4128 new_fltr.src = hw_vsi_id;
4130 new_fltr.flag |= ICE_FLTR_RX;
4131 new_fltr.src = hw->port_info->lport;
4134 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4135 new_fltr.vsi_handle = vsi_handle;
4136 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4137 f_list_entry.fltr_info = new_fltr;
4139 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4140 if (status != ICE_SUCCESS)
4141 goto set_promisc_exit;
4149 * ice_set_vlan_vsi_promisc
4150 * @hw: pointer to the hardware structure
4151 * @vsi_handle: VSI handle to configure
4152 * @promisc_mask: mask of promiscuous config bits
4153 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4155 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4158 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4159 bool rm_vlan_promisc)
4161 struct ice_switch_info *sw = hw->switch_info;
4162 struct ice_fltr_list_entry *list_itr, *tmp;
4163 struct LIST_HEAD_TYPE vsi_list_head;
4164 struct LIST_HEAD_TYPE *vlan_head;
4165 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4166 enum ice_status status;
4169 INIT_LIST_HEAD(&vsi_list_head);
4170 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4171 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4172 ice_acquire_lock(vlan_lock);
4173 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4175 ice_release_lock(vlan_lock);
4177 goto free_fltr_list;
4179 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4181 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4182 if (rm_vlan_promisc)
4183 status = ice_clear_vsi_promisc(hw, vsi_handle,
4184 promisc_mask, vlan_id);
4186 status = ice_set_vsi_promisc(hw, vsi_handle,
4187 promisc_mask, vlan_id);
4193 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4194 ice_fltr_list_entry, list_entry) {
4195 LIST_DEL(&list_itr->list_entry);
4196 ice_free(hw, list_itr);
4202 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4203 * @hw: pointer to the hardware structure
4204 * @vsi_handle: VSI handle to remove filters from
4205 * @lkup: switch rule filter lookup type
4208 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4209 enum ice_sw_lkup_type lkup)
4211 struct ice_switch_info *sw = hw->switch_info;
4212 struct ice_fltr_list_entry *fm_entry;
4213 struct LIST_HEAD_TYPE remove_list_head;
4214 struct LIST_HEAD_TYPE *rule_head;
4215 struct ice_fltr_list_entry *tmp;
4216 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4217 enum ice_status status;
4219 INIT_LIST_HEAD(&remove_list_head);
4220 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4221 rule_head = &sw->recp_list[lkup].filt_rules;
4222 ice_acquire_lock(rule_lock);
4223 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4225 ice_release_lock(rule_lock);
4230 case ICE_SW_LKUP_MAC:
4231 ice_remove_mac(hw, &remove_list_head);
4233 case ICE_SW_LKUP_VLAN:
4234 ice_remove_vlan(hw, &remove_list_head);
4236 case ICE_SW_LKUP_PROMISC:
4237 case ICE_SW_LKUP_PROMISC_VLAN:
4238 ice_remove_promisc(hw, lkup, &remove_list_head);
4240 case ICE_SW_LKUP_MAC_VLAN:
4241 ice_remove_mac_vlan(hw, &remove_list_head);
4243 case ICE_SW_LKUP_ETHERTYPE:
4244 case ICE_SW_LKUP_ETHERTYPE_MAC:
4245 ice_remove_eth_mac(hw, &remove_list_head);
4247 case ICE_SW_LKUP_DFLT:
4248 ice_debug(hw, ICE_DBG_SW,
4249 "Remove filters for this lookup type hasn't been implemented yet\n");
4251 case ICE_SW_LKUP_LAST:
4252 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4256 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4257 ice_fltr_list_entry, list_entry) {
4258 LIST_DEL(&fm_entry->list_entry);
4259 ice_free(hw, fm_entry);
4264 * ice_remove_vsi_fltr - Remove all filters for a VSI
4265 * @hw: pointer to the hardware structure
4266 * @vsi_handle: VSI handle to remove filters from
4268 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4270 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4272 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4273 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4274 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4275 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4276 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4277 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4278 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4279 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4283 * ice_alloc_res_cntr - allocating resource counter
4284 * @hw: pointer to the hardware structure
4285 * @type: type of resource
4286 * @alloc_shared: if set it is shared else dedicated
4287 * @num_items: number of entries requested for FD resource type
4288 * @counter_id: counter index returned by AQ call
4291 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4294 struct ice_aqc_alloc_free_res_elem *buf;
4295 enum ice_status status;
4298 /* Allocate resource */
4299 buf_len = sizeof(*buf);
4300 buf = (struct ice_aqc_alloc_free_res_elem *)
4301 ice_malloc(hw, buf_len);
4303 return ICE_ERR_NO_MEMORY;
4305 buf->num_elems = CPU_TO_LE16(num_items);
4306 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4307 ICE_AQC_RES_TYPE_M) | alloc_shared);
4309 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4310 ice_aqc_opc_alloc_res, NULL);
4314 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4322 * ice_free_res_cntr - free resource counter
4323 * @hw: pointer to the hardware structure
4324 * @type: type of resource
4325 * @alloc_shared: if set it is shared else dedicated
4326 * @num_items: number of entries to be freed for FD resource type
4327 * @counter_id: counter ID resource which needs to be freed
4330 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4333 struct ice_aqc_alloc_free_res_elem *buf;
4334 enum ice_status status;
4338 buf_len = sizeof(*buf);
4339 buf = (struct ice_aqc_alloc_free_res_elem *)
4340 ice_malloc(hw, buf_len);
4342 return ICE_ERR_NO_MEMORY;
4344 buf->num_elems = CPU_TO_LE16(num_items);
4345 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4346 ICE_AQC_RES_TYPE_M) | alloc_shared);
4347 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4349 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4350 ice_aqc_opc_free_res, NULL);
4352 ice_debug(hw, ICE_DBG_SW,
4353 "counter resource could not be freed\n");
4360 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4361 * @hw: pointer to the hardware structure
4362 * @counter_id: returns counter index
4364 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4366 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4367 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4372 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4373 * @hw: pointer to the hardware structure
4374 * @counter_id: counter index to be freed
4376 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4378 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4379 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4384 * ice_alloc_res_lg_act - add large action resource
4385 * @hw: pointer to the hardware structure
4386 * @l_id: large action ID to fill it in
4387 * @num_acts: number of actions to hold with a large action entry
4389 static enum ice_status
4390 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4392 struct ice_aqc_alloc_free_res_elem *sw_buf;
4393 enum ice_status status;
4396 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4397 return ICE_ERR_PARAM;
4399 /* Allocate resource for large action */
4400 buf_len = sizeof(*sw_buf);
4401 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4402 ice_malloc(hw, buf_len);
4404 return ICE_ERR_NO_MEMORY;
4406 sw_buf->num_elems = CPU_TO_LE16(1);
4408 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4409 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4410 * If num_acts is greater than 2, then use
4411 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4412 * The num_acts cannot exceed 4. This was ensured at the
4413 * beginning of the function.
4416 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4417 else if (num_acts == 2)
4418 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4420 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4422 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4423 ice_aqc_opc_alloc_res, NULL);
4425 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4427 ice_free(hw, sw_buf);
4432 * ice_add_mac_with_sw_marker - add filter with sw marker
4433 * @hw: pointer to the hardware structure
4434 * @f_info: filter info structure containing the MAC filter information
4435 * @sw_marker: sw marker to tag the Rx descriptor with
4438 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4441 struct ice_switch_info *sw = hw->switch_info;
4442 struct ice_fltr_mgmt_list_entry *m_entry;
4443 struct ice_fltr_list_entry fl_info;
4444 struct LIST_HEAD_TYPE l_head;
4445 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4446 enum ice_status ret;
4450 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4451 return ICE_ERR_PARAM;
4453 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4454 return ICE_ERR_PARAM;
4456 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4457 return ICE_ERR_PARAM;
4459 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4460 return ICE_ERR_PARAM;
4461 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4463 /* Add filter if it doesn't exist so then the adding of large
4464 * action always results in update
4467 INIT_LIST_HEAD(&l_head);
4468 fl_info.fltr_info = *f_info;
4469 LIST_ADD(&fl_info.list_entry, &l_head);
4471 entry_exists = false;
4472 ret = ice_add_mac(hw, &l_head);
4473 if (ret == ICE_ERR_ALREADY_EXISTS)
4474 entry_exists = true;
4478 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4479 ice_acquire_lock(rule_lock);
4480 /* Get the book keeping entry for the filter */
4481 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4485 /* If counter action was enabled for this rule then don't enable
4486 * sw marker large action
4488 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4489 ret = ICE_ERR_PARAM;
4493 /* if same marker was added before */
4494 if (m_entry->sw_marker_id == sw_marker) {
4495 ret = ICE_ERR_ALREADY_EXISTS;
4499 /* Allocate a hardware table entry to hold large act. Three actions
4500 * for marker based large action
4502 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4506 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4509 /* Update the switch rule to add the marker action */
4510 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4512 ice_release_lock(rule_lock);
4517 ice_release_lock(rule_lock);
4518 /* only remove entry if it did not exist previously */
4520 ret = ice_remove_mac(hw, &l_head);
4526 * ice_add_mac_with_counter - add filter with counter enabled
4527 * @hw: pointer to the hardware structure
4528 * @f_info: pointer to filter info structure containing the MAC filter
4532 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4534 struct ice_switch_info *sw = hw->switch_info;
4535 struct ice_fltr_mgmt_list_entry *m_entry;
4536 struct ice_fltr_list_entry fl_info;
4537 struct LIST_HEAD_TYPE l_head;
4538 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4539 enum ice_status ret;
4544 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4545 return ICE_ERR_PARAM;
4547 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4548 return ICE_ERR_PARAM;
4550 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4551 return ICE_ERR_PARAM;
4552 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4554 entry_exist = false;
4556 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4558 /* Add filter if it doesn't exist so then the adding of large
4559 * action always results in update
4561 INIT_LIST_HEAD(&l_head);
4563 fl_info.fltr_info = *f_info;
4564 LIST_ADD(&fl_info.list_entry, &l_head);
4566 ret = ice_add_mac(hw, &l_head);
4567 if (ret == ICE_ERR_ALREADY_EXISTS)
4572 ice_acquire_lock(rule_lock);
4573 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4575 ret = ICE_ERR_BAD_PTR;
4579 /* Don't enable counter for a filter for which sw marker was enabled */
4580 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4581 ret = ICE_ERR_PARAM;
4585 /* If a counter was already enabled then don't need to add again */
4586 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4587 ret = ICE_ERR_ALREADY_EXISTS;
4591 /* Allocate a hardware table entry to VLAN counter */
4592 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4596 /* Allocate a hardware table entry to hold large act. Two actions for
4597 * counter based large action
4599 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4603 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4606 /* Update the switch rule to add the counter action */
4607 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4609 ice_release_lock(rule_lock);
4614 ice_release_lock(rule_lock);
4615 /* only remove entry if it did not exist previously */
4617 ret = ice_remove_mac(hw, &l_head);
4622 /* This is mapping table entry that maps every word within a given protocol
4623 * structure to the real byte offset as per the specification of that
4625 * for example dst address is 3 words in ethertype header and corresponding
4626 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4627 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4628 * matching entry describing its field. This needs to be updated if new
4629 * structure is added to that union.
4631 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4632 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4633 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4634 { ICE_ETYPE_OL, { 0 } },
4635 { ICE_VLAN_OFOS, { 0, 2 } },
4636 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4637 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4638 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4639 26, 28, 30, 32, 34, 36, 38 } },
4640 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4641 26, 28, 30, 32, 34, 36, 38 } },
4642 { ICE_TCP_IL, { 0, 2 } },
4643 { ICE_UDP_OF, { 0, 2 } },
4644 { ICE_UDP_ILOS, { 0, 2 } },
4645 { ICE_SCTP_IL, { 0, 2 } },
4646 { ICE_VXLAN, { 8, 10, 12, 14 } },
4647 { ICE_GENEVE, { 8, 10, 12, 14 } },
4648 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4649 { ICE_NVGRE, { 0, 2, 4, 6 } },
4650 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4651 { ICE_PPPOE, { 0, 2, 4, 6 } },
4654 /* The following table describes preferred grouping of recipes.
4655 * If a recipe that needs to be programmed is a superset or matches one of the
4656 * following combinations, then the recipe needs to be chained as per the
4660 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4661 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4662 { ICE_MAC_IL, ICE_MAC_IL_HW },
4663 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4664 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4665 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4666 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4667 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4668 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4669 { ICE_TCP_IL, ICE_TCP_IL_HW },
4670 { ICE_UDP_OF, ICE_UDP_OF_HW },
4671 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4672 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4673 { ICE_VXLAN, ICE_UDP_OF_HW },
4674 { ICE_GENEVE, ICE_UDP_OF_HW },
4675 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4676 { ICE_NVGRE, ICE_GRE_OF_HW },
4677 { ICE_GTP, ICE_UDP_OF_HW },
4678 { ICE_PPPOE, ICE_PPPOE_HW },
4682 * ice_find_recp - find a recipe
4683 * @hw: pointer to the hardware structure
4684 * @lkup_exts: extension sequence to match
4686 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4688 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4690 bool refresh_required = true;
4691 struct ice_sw_recipe *recp;
4694 /* Walk through existing recipes to find a match */
4695 recp = hw->switch_info->recp_list;
4696 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4697 /* If recipe was not created for this ID, in SW bookkeeping,
4698 * check if FW has an entry for this recipe. If the FW has an
4699 * entry update it in our SW bookkeeping and continue with the
4702 if (!recp[i].recp_created)
4703 if (ice_get_recp_frm_fw(hw,
4704 hw->switch_info->recp_list, i,
4708 /* Skip inverse action recipes */
4709 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4710 ICE_AQ_RECIPE_ACT_INV_ACT)
4713 /* if number of words we are looking for match */
4714 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4715 struct ice_fv_word *a = lkup_exts->fv_words;
4716 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4720 for (p = 0; p < lkup_exts->n_val_words; p++) {
4721 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4723 if (a[p].off == b[q].off &&
4724 a[p].prot_id == b[q].prot_id)
4725 /* Found the "p"th word in the
4730 /* After walking through all the words in the
4731 * "i"th recipe if "p"th word was not found then
4732 * this recipe is not what we are looking for.
4733 * So break out from this loop and try the next
4736 if (q >= recp[i].lkup_exts.n_val_words) {
4741 /* If for "i"th recipe the found was never set to false
4742 * then it means we found our match
4745 return i; /* Return the recipe ID */
4748 return ICE_MAX_NUM_RECIPES;
4752 * ice_prot_type_to_id - get protocol ID from protocol type
4753 * @type: protocol type
4754 * @id: pointer to variable that will receive the ID
4756 * Returns true if found, false otherwise
4758 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4762 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4763 if (ice_prot_id_tbl[i].type == type) {
4764 *id = ice_prot_id_tbl[i].protocol_id;
4771 * ice_find_valid_words - count valid words
4772 * @rule: advanced rule with lookup information
4773 * @lkup_exts: byte offset extractions of the words that are valid
4775 * calculate valid words in a lookup rule using mask value
4778 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4779 struct ice_prot_lkup_ext *lkup_exts)
4785 if (!ice_prot_type_to_id(rule->type, &prot_id))
4788 word = lkup_exts->n_val_words;
4790 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4791 if (((u16 *)&rule->m_u)[j] &&
4792 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4793 /* No more space to accommodate */
4794 if (word >= ICE_MAX_CHAIN_WORDS)
4796 lkup_exts->fv_words[word].off =
4797 ice_prot_ext[rule->type].offs[j];
4798 lkup_exts->fv_words[word].prot_id =
4799 ice_prot_id_tbl[rule->type].protocol_id;
4800 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4804 ret_val = word - lkup_exts->n_val_words;
4805 lkup_exts->n_val_words = word;
4811 * ice_create_first_fit_recp_def - Create a recipe grouping
4812 * @hw: pointer to the hardware structure
4813 * @lkup_exts: an array of protocol header extractions
4814 * @rg_list: pointer to a list that stores new recipe groups
4815 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4817 * Using first fit algorithm, take all the words that are still not done
4818 * and start grouping them in 4-word groups. Each group makes up one
4821 static enum ice_status
4822 ice_create_first_fit_recp_def(struct ice_hw *hw,
4823 struct ice_prot_lkup_ext *lkup_exts,
4824 struct LIST_HEAD_TYPE *rg_list,
4827 struct ice_pref_recipe_group *grp = NULL;
4832 /* Walk through every word in the rule to check if it is not done. If so
4833 * then this word needs to be part of a new recipe.
4835 for (j = 0; j < lkup_exts->n_val_words; j++)
4836 if (!ice_is_bit_set(lkup_exts->done, j)) {
4838 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4839 struct ice_recp_grp_entry *entry;
4841 entry = (struct ice_recp_grp_entry *)
4842 ice_malloc(hw, sizeof(*entry));
4844 return ICE_ERR_NO_MEMORY;
4845 LIST_ADD(&entry->l_entry, rg_list);
4846 grp = &entry->r_group;
4850 grp->pairs[grp->n_val_pairs].prot_id =
4851 lkup_exts->fv_words[j].prot_id;
4852 grp->pairs[grp->n_val_pairs].off =
4853 lkup_exts->fv_words[j].off;
4854 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4862 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4863 * @hw: pointer to the hardware structure
4864 * @fv_list: field vector with the extraction sequence information
4865 * @rg_list: recipe groupings with protocol-offset pairs
4867 * Helper function to fill in the field vector indices for protocol-offset
4868 * pairs. These indexes are then ultimately programmed into a recipe.
4870 static enum ice_status
4871 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4872 struct LIST_HEAD_TYPE *rg_list)
4874 struct ice_sw_fv_list_entry *fv;
4875 struct ice_recp_grp_entry *rg;
4876 struct ice_fv_word *fv_ext;
4878 if (LIST_EMPTY(fv_list))
4881 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4882 fv_ext = fv->fv_ptr->ew;
4884 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4887 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4888 struct ice_fv_word *pr;
4893 pr = &rg->r_group.pairs[i];
4894 mask = rg->r_group.mask[i];
4896 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4897 if (fv_ext[j].prot_id == pr->prot_id &&
4898 fv_ext[j].off == pr->off) {
4901 /* Store index of field vector */
4903 /* Mask is given by caller as big
4904 * endian, but sent to FW as little
4907 rg->fv_mask[i] = mask << 8 | mask >> 8;
4911 /* Protocol/offset could not be found, caller gave an
4915 return ICE_ERR_PARAM;
4923 * ice_find_free_recp_res_idx - find free result indexes for recipe
4924 * @hw: pointer to hardware structure
4925 * @profiles: bitmap of profiles that will be associated with the new recipe
4926 * @free_idx: pointer to variable to receive the free index bitmap
4928 * The algorithm used here is:
4929 * 1. When creating a new recipe, create a set P which contains all
4930 * Profiles that will be associated with our new recipe
4932 * 2. For each Profile p in set P:
4933 * a. Add all recipes associated with Profile p into set R
4934 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4935 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4936 * i. Or just assume they all have the same possible indexes:
4938 * i.e., PossibleIndexes = 0x0000F00000000000
4940 * 3. For each Recipe r in set R:
4941 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4942 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4944 * FreeIndexes will contain the bits indicating the indexes free for use,
4945 * then the code needs to update the recipe[r].used_result_idx_bits to
4946 * indicate which indexes were selected for use by this recipe.
4949 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
4950 ice_bitmap_t *free_idx)
4952 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4953 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
4954 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4958 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
4959 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
4960 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4961 ice_init_possible_res_bm(possible_idx);
4963 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
4964 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
4965 ice_set_bit(bit, possible_idx);
4967 /* For each profile we are going to associate the recipe with, add the
4968 * recipes that are associated with that profile. This will give us
4969 * the set of recipes that our recipe may collide with.
4972 while (ICE_MAX_NUM_PROFILES >
4973 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
4974 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
4975 ICE_MAX_NUM_RECIPES);
4980 /* For each recipe that our new recipe may collide with, determine
4981 * which indexes have been used.
4983 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
4984 if (ice_is_bit_set(recipes, bit))
4985 ice_or_bitmap(used_idx, used_idx,
4986 hw->switch_info->recp_list[bit].res_idxs,
4989 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4991 /* return number of free indexes */
4993 while (ICE_MAX_FV_WORDS >
4994 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5003 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5004 * @hw: pointer to hardware structure
5005 * @rm: recipe management list entry
5006 * @match_tun: if field vector index for tunnel needs to be programmed
5007 * @profiles: bitmap of profiles that will be assocated.
5009 static enum ice_status
5010 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5011 bool match_tun, ice_bitmap_t *profiles)
5013 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5014 struct ice_aqc_recipe_data_elem *tmp;
5015 struct ice_aqc_recipe_data_elem *buf;
5016 struct ice_recp_grp_entry *entry;
5017 enum ice_status status;
5023 /* When more than one recipe are required, another recipe is needed to
5024 * chain them together. Matching a tunnel metadata ID takes up one of
5025 * the match fields in the chaining recipe reducing the number of
5026 * chained recipes by one.
5028 /* check number of free result indices */
5029 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5030 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5032 if (rm->n_grp_count > 1) {
5033 if (rm->n_grp_count > free_res_idx)
5034 return ICE_ERR_MAX_LIMIT;
5039 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5040 ICE_MAX_NUM_RECIPES,
5043 return ICE_ERR_NO_MEMORY;
5045 buf = (struct ice_aqc_recipe_data_elem *)
5046 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5048 status = ICE_ERR_NO_MEMORY;
5052 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5053 recipe_count = ICE_MAX_NUM_RECIPES;
5054 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5056 if (status || recipe_count == 0)
5059 /* Allocate the recipe resources, and configure them according to the
5060 * match fields from protocol headers and extracted field vectors.
5062 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5063 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5066 status = ice_alloc_recipe(hw, &entry->rid);
5070 /* Clear the result index of the located recipe, as this will be
5071 * updated, if needed, later in the recipe creation process.
5073 tmp[0].content.result_indx = 0;
5075 buf[recps] = tmp[0];
5076 buf[recps].recipe_indx = (u8)entry->rid;
5077 /* if the recipe is a non-root recipe RID should be programmed
5078 * as 0 for the rules to be applied correctly.
5080 buf[recps].content.rid = 0;
5081 ice_memset(&buf[recps].content.lkup_indx, 0,
5082 sizeof(buf[recps].content.lkup_indx),
5085 /* All recipes use look-up index 0 to match switch ID. */
5086 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5087 buf[recps].content.mask[0] =
5088 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5089 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5092 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5093 buf[recps].content.lkup_indx[i] = 0x80;
5094 buf[recps].content.mask[i] = 0;
5097 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5098 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5099 buf[recps].content.mask[i + 1] =
5100 CPU_TO_LE16(entry->fv_mask[i]);
5103 if (rm->n_grp_count > 1) {
5104 /* Checks to see if there really is a valid result index
5107 if (chain_idx >= ICE_MAX_FV_WORDS) {
5108 ice_debug(hw, ICE_DBG_SW,
5109 "No chain index available\n");
5110 status = ICE_ERR_MAX_LIMIT;
5114 entry->chain_idx = chain_idx;
5115 buf[recps].content.result_indx =
5116 ICE_AQ_RECIPE_RESULT_EN |
5117 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5118 ICE_AQ_RECIPE_RESULT_DATA_M);
5119 ice_clear_bit(chain_idx, result_idx_bm);
5120 chain_idx = ice_find_first_bit(result_idx_bm,
5124 /* fill recipe dependencies */
5125 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5126 ICE_MAX_NUM_RECIPES);
5127 ice_set_bit(buf[recps].recipe_indx,
5128 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5129 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5133 if (rm->n_grp_count == 1) {
5134 rm->root_rid = buf[0].recipe_indx;
5135 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5136 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5137 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5138 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5139 sizeof(buf[0].recipe_bitmap),
5140 ICE_NONDMA_TO_NONDMA);
5142 status = ICE_ERR_BAD_PTR;
5145 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5146 * the recipe which is getting created if specified
5147 * by user. Usually any advanced switch filter, which results
5148 * into new extraction sequence, ended up creating a new recipe
5149 * of type ROOT and usually recipes are associated with profiles
5150 * Switch rule referreing newly created recipe, needs to have
5151 * either/or 'fwd' or 'join' priority, otherwise switch rule
5152 * evaluation will not happen correctly. In other words, if
5153 * switch rule to be evaluated on priority basis, then recipe
5154 * needs to have priority, otherwise it will be evaluated last.
5156 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5158 struct ice_recp_grp_entry *last_chain_entry;
5161 /* Allocate the last recipe that will chain the outcomes of the
5162 * other recipes together
5164 status = ice_alloc_recipe(hw, &rid);
5168 buf[recps].recipe_indx = (u8)rid;
5169 buf[recps].content.rid = (u8)rid;
5170 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5171 /* the new entry created should also be part of rg_list to
5172 * make sure we have complete recipe
5174 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5175 sizeof(*last_chain_entry));
5176 if (!last_chain_entry) {
5177 status = ICE_ERR_NO_MEMORY;
5180 last_chain_entry->rid = rid;
5181 ice_memset(&buf[recps].content.lkup_indx, 0,
5182 sizeof(buf[recps].content.lkup_indx),
5184 /* All recipes use look-up index 0 to match switch ID. */
5185 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5186 buf[recps].content.mask[0] =
5187 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5188 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5189 buf[recps].content.lkup_indx[i] =
5190 ICE_AQ_RECIPE_LKUP_IGNORE;
5191 buf[recps].content.mask[i] = 0;
5195 /* update r_bitmap with the recp that is used for chaining */
5196 ice_set_bit(rid, rm->r_bitmap);
5197 /* this is the recipe that chains all the other recipes so it
5198 * should not have a chaining ID to indicate the same
5200 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5201 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5203 last_chain_entry->fv_idx[i] = entry->chain_idx;
5204 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5205 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5206 ice_set_bit(entry->rid, rm->r_bitmap);
5208 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5209 if (sizeof(buf[recps].recipe_bitmap) >=
5210 sizeof(rm->r_bitmap)) {
5211 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5212 sizeof(buf[recps].recipe_bitmap),
5213 ICE_NONDMA_TO_NONDMA);
5215 status = ICE_ERR_BAD_PTR;
5218 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5220 /* To differentiate among different UDP tunnels, a meta data ID
5224 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5225 buf[recps].content.mask[i] =
5226 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5230 rm->root_rid = (u8)rid;
5232 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5236 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5237 ice_release_change_lock(hw);
5241 /* Every recipe that just got created add it to the recipe
5244 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5245 struct ice_switch_info *sw = hw->switch_info;
5246 bool is_root, idx_found = false;
5247 struct ice_sw_recipe *recp;
5248 u16 idx, buf_idx = 0;
5250 /* find buffer index for copying some data */
5251 for (idx = 0; idx < rm->n_grp_count; idx++)
5252 if (buf[idx].recipe_indx == entry->rid) {
5258 status = ICE_ERR_OUT_OF_RANGE;
5262 recp = &sw->recp_list[entry->rid];
5263 is_root = (rm->root_rid == entry->rid);
5264 recp->is_root = is_root;
5266 recp->root_rid = entry->rid;
5267 recp->big_recp = (is_root && rm->n_grp_count > 1);
5269 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5270 entry->r_group.n_val_pairs *
5271 sizeof(struct ice_fv_word),
5272 ICE_NONDMA_TO_NONDMA);
5274 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5275 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5277 /* Copy non-result fv index values and masks to recipe. This
5278 * call will also update the result recipe bitmask.
5280 ice_collect_result_idx(&buf[buf_idx], recp);
5282 /* for non-root recipes, also copy to the root, this allows
5283 * easier matching of a complete chained recipe
5286 ice_collect_result_idx(&buf[buf_idx],
5287 &sw->recp_list[rm->root_rid]);
5289 recp->n_ext_words = entry->r_group.n_val_pairs;
5290 recp->chain_idx = entry->chain_idx;
5291 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5292 recp->n_grp_count = rm->n_grp_count;
5293 recp->tun_type = rm->tun_type;
5294 recp->recp_created = true;
5309 * ice_create_recipe_group - creates recipe group
5310 * @hw: pointer to hardware structure
5311 * @rm: recipe management list entry
5312 * @lkup_exts: lookup elements
5314 static enum ice_status
5315 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5316 struct ice_prot_lkup_ext *lkup_exts)
5318 enum ice_status status;
5321 rm->n_grp_count = 0;
5323 /* Create recipes for words that are marked not done by packing them
5326 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5327 &rm->rg_list, &recp_count);
5329 rm->n_grp_count += recp_count;
5330 rm->n_ext_words = lkup_exts->n_val_words;
5331 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5332 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5333 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5334 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5341 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5342 * @hw: pointer to hardware structure
5343 * @lkups: lookup elements or match criteria for the advanced recipe, one
5344 * structure per protocol header
5345 * @lkups_cnt: number of protocols
5346 * @bm: bitmap of field vectors to consider
5347 * @fv_list: pointer to a list that holds the returned field vectors
5349 static enum ice_status
5350 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5351 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5353 enum ice_status status;
5357 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5359 return ICE_ERR_NO_MEMORY;
5361 for (i = 0; i < lkups_cnt; i++)
5362 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5363 status = ICE_ERR_CFG;
5367 /* Find field vectors that include all specified protocol types */
5368 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5371 ice_free(hw, prot_ids);
5376 * ice_add_special_words - Add words that are not protocols, such as metadata
5377 * @rinfo: other information regarding the rule e.g. priority and action info
5378 * @lkup_exts: lookup word structure
5380 static enum ice_status
5381 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5382 struct ice_prot_lkup_ext *lkup_exts)
5384 /* If this is a tunneled packet, then add recipe index to match the
5385 * tunnel bit in the packet metadata flags.
5387 if (rinfo->tun_type != ICE_NON_TUN) {
5388 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5389 u8 word = lkup_exts->n_val_words++;
5391 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5392 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5394 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5396 return ICE_ERR_MAX_LIMIT;
5403 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5404 * @hw: pointer to hardware structure
5405 * @rinfo: other information regarding the rule e.g. priority and action info
5406 * @bm: pointer to memory for returning the bitmap of field vectors
5409 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5412 enum ice_prof_type type;
5414 switch (rinfo->tun_type) {
5416 type = ICE_PROF_NON_TUN;
5418 case ICE_ALL_TUNNELS:
5419 type = ICE_PROF_TUN_ALL;
5421 case ICE_SW_TUN_VXLAN_GPE:
5422 case ICE_SW_TUN_GENEVE:
5423 case ICE_SW_TUN_VXLAN:
5424 case ICE_SW_TUN_UDP:
5425 case ICE_SW_TUN_GTP:
5426 type = ICE_PROF_TUN_UDP;
5428 case ICE_SW_TUN_NVGRE:
5429 type = ICE_PROF_TUN_GRE;
5431 case ICE_SW_TUN_PPPOE:
5432 type = ICE_PROF_TUN_PPPOE;
5434 case ICE_SW_TUN_AND_NON_TUN:
5436 type = ICE_PROF_ALL;
5440 ice_get_sw_fv_bitmap(hw, type, bm);
5444 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5445 * @hw: pointer to hardware structure
5446 * @lkups: lookup elements or match criteria for the advanced recipe, one
5447 * structure per protocol header
5448 * @lkups_cnt: number of protocols
5449 * @rinfo: other information regarding the rule e.g. priority and action info
5450 * @rid: return the recipe ID of the recipe created
5452 static enum ice_status
5453 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5454 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5456 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5457 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5458 struct ice_prot_lkup_ext *lkup_exts;
5459 struct ice_recp_grp_entry *r_entry;
5460 struct ice_sw_fv_list_entry *fvit;
5461 struct ice_recp_grp_entry *r_tmp;
5462 struct ice_sw_fv_list_entry *tmp;
5463 enum ice_status status = ICE_SUCCESS;
5464 struct ice_sw_recipe *rm;
5465 bool match_tun = false;
5469 return ICE_ERR_PARAM;
5471 lkup_exts = (struct ice_prot_lkup_ext *)
5472 ice_malloc(hw, sizeof(*lkup_exts));
5474 return ICE_ERR_NO_MEMORY;
5476 /* Determine the number of words to be matched and if it exceeds a
5477 * recipe's restrictions
5479 for (i = 0; i < lkups_cnt; i++) {
5482 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5483 status = ICE_ERR_CFG;
5484 goto err_free_lkup_exts;
5487 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5489 status = ICE_ERR_CFG;
5490 goto err_free_lkup_exts;
5494 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5496 status = ICE_ERR_NO_MEMORY;
5497 goto err_free_lkup_exts;
5500 /* Get field vectors that contain fields extracted from all the protocol
5501 * headers being programmed.
5503 INIT_LIST_HEAD(&rm->fv_list);
5504 INIT_LIST_HEAD(&rm->rg_list);
5506 /* Get bitmap of field vectors (profiles) that are compatible with the
5507 * rule request; only these will be searched in the subsequent call to
5510 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5512 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5516 /* Group match words into recipes using preferred recipe grouping
5519 status = ice_create_recipe_group(hw, rm, lkup_exts);
5523 /* There is only profile for UDP tunnels. So, it is necessary to use a
5524 * metadata ID flag to differentiate different tunnel types. A separate
5525 * recipe needs to be used for the metadata.
5527 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5528 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5529 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5532 /* set the recipe priority if specified */
5533 rm->priority = rinfo->priority ? rinfo->priority : 0;
5535 /* Find offsets from the field vector. Pick the first one for all the
5538 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5542 /* get bitmap of all profiles the recipe will be associated with */
5543 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5544 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5546 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5547 ice_set_bit((u16)fvit->profile_id, profiles);
5550 /* Create any special protocol/offset pairs, such as looking at tunnel
5551 * bits by extracting metadata
5553 status = ice_add_special_words(rinfo, lkup_exts);
5555 goto err_free_lkup_exts;
5557 /* Look for a recipe which matches our requested fv / mask list */
5558 *rid = ice_find_recp(hw, lkup_exts);
5559 if (*rid < ICE_MAX_NUM_RECIPES)
5560 /* Success if found a recipe that match the existing criteria */
5563 /* Recipe we need does not exist, add a recipe */
5564 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5568 /* Associate all the recipes created with all the profiles in the
5569 * common field vector.
5571 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5573 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5576 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5577 (u8 *)r_bitmap, NULL);
5581 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5582 ICE_MAX_NUM_RECIPES);
5583 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5587 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5590 ice_release_change_lock(hw);
5595 /* Update profile to recipe bitmap array */
5596 ice_memcpy(profile_to_recipe[fvit->profile_id], r_bitmap,
5597 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
5599 /* Update recipe to profile bitmap array */
5600 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5601 if (ice_is_bit_set(r_bitmap, j))
5602 ice_set_bit((u16)fvit->profile_id,
5603 recipe_to_profile[j]);
5606 *rid = rm->root_rid;
5607 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5608 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5610 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5611 ice_recp_grp_entry, l_entry) {
5612 LIST_DEL(&r_entry->l_entry);
5613 ice_free(hw, r_entry);
5616 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5618 LIST_DEL(&fvit->list_entry);
5623 ice_free(hw, rm->root_buf);
5628 ice_free(hw, lkup_exts);
5634 * ice_find_dummy_packet - find dummy packet by tunnel type
5636 * @lkups: lookup elements or match criteria for the advanced recipe, one
5637 * structure per protocol header
5638 * @lkups_cnt: number of protocols
5639 * @tun_type: tunnel type from the match criteria
5640 * @pkt: dummy packet to fill according to filter match criteria
5641 * @pkt_len: packet length of dummy packet
5642 * @offsets: pointer to receive the pointer to the offsets for the packet
5645 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5646 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5648 const struct ice_dummy_pkt_offsets **offsets)
5650 bool tcp = false, udp = false, ipv6 = false;
5653 if (tun_type == ICE_SW_TUN_GTP) {
5654 *pkt = dummy_udp_gtp_packet;
5655 *pkt_len = sizeof(dummy_udp_gtp_packet);
5656 *offsets = dummy_udp_gtp_packet_offsets;
5659 if (tun_type == ICE_SW_TUN_PPPOE) {
5660 *pkt = dummy_pppoe_packet;
5661 *pkt_len = sizeof(dummy_pppoe_packet);
5662 *offsets = dummy_pppoe_packet_offsets;
5665 for (i = 0; i < lkups_cnt; i++) {
5666 if (lkups[i].type == ICE_UDP_ILOS)
5668 else if (lkups[i].type == ICE_TCP_IL)
5670 else if (lkups[i].type == ICE_IPV6_OFOS)
5674 if (tun_type == ICE_ALL_TUNNELS) {
5675 *pkt = dummy_gre_udp_packet;
5676 *pkt_len = sizeof(dummy_gre_udp_packet);
5677 *offsets = dummy_gre_udp_packet_offsets;
5681 if (tun_type == ICE_SW_TUN_NVGRE) {
5683 *pkt = dummy_gre_tcp_packet;
5684 *pkt_len = sizeof(dummy_gre_tcp_packet);
5685 *offsets = dummy_gre_tcp_packet_offsets;
5689 *pkt = dummy_gre_udp_packet;
5690 *pkt_len = sizeof(dummy_gre_udp_packet);
5691 *offsets = dummy_gre_udp_packet_offsets;
5695 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5696 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5698 *pkt = dummy_udp_tun_tcp_packet;
5699 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5700 *offsets = dummy_udp_tun_tcp_packet_offsets;
5704 *pkt = dummy_udp_tun_udp_packet;
5705 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5706 *offsets = dummy_udp_tun_udp_packet_offsets;
5711 *pkt = dummy_udp_packet;
5712 *pkt_len = sizeof(dummy_udp_packet);
5713 *offsets = dummy_udp_packet_offsets;
5715 } else if (udp && ipv6) {
5716 *pkt = dummy_udp_ipv6_packet;
5717 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5718 *offsets = dummy_udp_ipv6_packet_offsets;
5720 } else if ((tcp && ipv6) || ipv6) {
5721 *pkt = dummy_tcp_ipv6_packet;
5722 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5723 *offsets = dummy_tcp_ipv6_packet_offsets;
5727 *pkt = dummy_tcp_packet;
5728 *pkt_len = sizeof(dummy_tcp_packet);
5729 *offsets = dummy_tcp_packet_offsets;
5733 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5735 * @lkups: lookup elements or match criteria for the advanced recipe, one
5736 * structure per protocol header
5737 * @lkups_cnt: number of protocols
5738 * @s_rule: stores rule information from the match criteria
5739 * @dummy_pkt: dummy packet to fill according to filter match criteria
5740 * @pkt_len: packet length of dummy packet
5741 * @offsets: offset info for the dummy packet
5743 static enum ice_status
5744 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5745 struct ice_aqc_sw_rules_elem *s_rule,
5746 const u8 *dummy_pkt, u16 pkt_len,
5747 const struct ice_dummy_pkt_offsets *offsets)
5752 /* Start with a packet with a pre-defined/dummy content. Then, fill
5753 * in the header values to be looked up or matched.
5755 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5757 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5759 for (i = 0; i < lkups_cnt; i++) {
5760 enum ice_protocol_type type;
5761 u16 offset = 0, len = 0, j;
5764 /* find the start of this layer; it should be found since this
5765 * was already checked when search for the dummy packet
5767 type = lkups[i].type;
5768 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5769 if (type == offsets[j].type) {
5770 offset = offsets[j].offset;
5775 /* this should never happen in a correct calling sequence */
5777 return ICE_ERR_PARAM;
5779 switch (lkups[i].type) {
5782 len = sizeof(struct ice_ether_hdr);
5785 len = sizeof(struct ice_ethtype_hdr);
5788 len = sizeof(struct ice_vlan_hdr);
5792 len = sizeof(struct ice_ipv4_hdr);
5796 len = sizeof(struct ice_ipv6_hdr);
5801 len = sizeof(struct ice_l4_hdr);
5804 len = sizeof(struct ice_sctp_hdr);
5807 len = sizeof(struct ice_nvgre);
5812 len = sizeof(struct ice_udp_tnl_hdr);
5816 len = sizeof(struct ice_udp_gtp_hdr);
5819 len = sizeof(struct ice_pppoe_hdr);
5822 return ICE_ERR_PARAM;
5825 /* the length should be a word multiple */
5826 if (len % ICE_BYTES_PER_WORD)
5829 /* We have the offset to the header start, the length, the
5830 * caller's header values and mask. Use this information to
5831 * copy the data into the dummy packet appropriately based on
5832 * the mask. Note that we need to only write the bits as
5833 * indicated by the mask to make sure we don't improperly write
5834 * over any significant packet data.
5836 for (j = 0; j < len / sizeof(u16); j++)
5837 if (((u16 *)&lkups[i].m_u)[j])
5838 ((u16 *)(pkt + offset))[j] =
5839 (((u16 *)(pkt + offset))[j] &
5840 ~((u16 *)&lkups[i].m_u)[j]) |
5841 (((u16 *)&lkups[i].h_u)[j] &
5842 ((u16 *)&lkups[i].m_u)[j]);
5845 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5851 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5852 * @hw: pointer to the hardware structure
5853 * @tun_type: tunnel type
5854 * @pkt: dummy packet to fill in
5855 * @offsets: offset info for the dummy packet
5857 static enum ice_status
5858 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5859 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5864 case ICE_SW_TUN_AND_NON_TUN:
5865 case ICE_SW_TUN_VXLAN_GPE:
5866 case ICE_SW_TUN_VXLAN:
5867 case ICE_SW_TUN_UDP:
5868 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
5872 case ICE_SW_TUN_GENEVE:
5873 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
5878 /* Nothing needs to be done for this tunnel type */
5882 /* Find the outer UDP protocol header and insert the port number */
5883 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5884 if (offsets[i].type == ICE_UDP_OF) {
5885 struct ice_l4_hdr *hdr;
5888 offset = offsets[i].offset;
5889 hdr = (struct ice_l4_hdr *)&pkt[offset];
5890 hdr->dst_port = open_port << 8 | open_port >> 8;
5900 * ice_find_adv_rule_entry - Search a rule entry
5901 * @hw: pointer to the hardware structure
5902 * @lkups: lookup elements or match criteria for the advanced recipe, one
5903 * structure per protocol header
5904 * @lkups_cnt: number of protocols
5905 * @recp_id: recipe ID for which we are finding the rule
5906 * @rinfo: other information regarding the rule e.g. priority and action info
5908 * Helper function to search for a given advance rule entry
5909 * Returns pointer to entry storing the rule if found
5911 static struct ice_adv_fltr_mgmt_list_entry *
5912 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5913 u16 lkups_cnt, u8 recp_id,
5914 struct ice_adv_rule_info *rinfo)
5916 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5917 struct ice_switch_info *sw = hw->switch_info;
5920 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5921 ice_adv_fltr_mgmt_list_entry, list_entry) {
5922 bool lkups_matched = true;
5924 if (lkups_cnt != list_itr->lkups_cnt)
5926 for (i = 0; i < list_itr->lkups_cnt; i++)
5927 if (memcmp(&list_itr->lkups[i], &lkups[i],
5929 lkups_matched = false;
5932 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5933 rinfo->tun_type == list_itr->rule_info.tun_type &&
5941 * ice_adv_add_update_vsi_list
5942 * @hw: pointer to the hardware structure
5943 * @m_entry: pointer to current adv filter management list entry
5944 * @cur_fltr: filter information from the book keeping entry
5945 * @new_fltr: filter information with the new VSI to be added
5947 * Call AQ command to add or update previously created VSI list with new VSI.
5949 * Helper function to do book keeping associated with adding filter information
5950 * The algorithm to do the booking keeping is described below :
5951 * When a VSI needs to subscribe to a given advanced filter
5952 * if only one VSI has been added till now
5953 * Allocate a new VSI list and add two VSIs
5954 * to this list using switch rule command
5955 * Update the previously created switch rule with the
5956 * newly created VSI list ID
5957 * if a VSI list was previously created
5958 * Add the new VSI to the previously created VSI list set
5959 * using the update switch rule command
5961 static enum ice_status
5962 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5963 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5964 struct ice_adv_rule_info *cur_fltr,
5965 struct ice_adv_rule_info *new_fltr)
5967 enum ice_status status;
5968 u16 vsi_list_id = 0;
5970 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5971 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5972 return ICE_ERR_NOT_IMPL;
5974 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5975 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5976 return ICE_ERR_ALREADY_EXISTS;
5978 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5979 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5980 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5981 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5982 return ICE_ERR_NOT_IMPL;
5984 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5985 /* Only one entry existed in the mapping and it was not already
5986 * a part of a VSI list. So, create a VSI list with the old and
5989 struct ice_fltr_info tmp_fltr;
5990 u16 vsi_handle_arr[2];
5992 /* A rule already exists with the new VSI being added */
5993 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5994 new_fltr->sw_act.fwd_id.hw_vsi_id)
5995 return ICE_ERR_ALREADY_EXISTS;
5997 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5998 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5999 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6005 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6006 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6007 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6008 /* Update the previous switch rule of "forward to VSI" to
6011 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6015 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6016 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6017 m_entry->vsi_list_info =
6018 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6021 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6023 if (!m_entry->vsi_list_info)
6026 /* A rule already exists with the new VSI being added */
6027 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6030 /* Update the previously created VSI list set with
6031 * the new VSI ID passed in
6033 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6035 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6037 ice_aqc_opc_update_sw_rules,
6039 /* update VSI list mapping info with new VSI ID */
6041 ice_set_bit(vsi_handle,
6042 m_entry->vsi_list_info->vsi_map);
6045 m_entry->vsi_count++;
6050 * ice_add_adv_rule - helper function to create an advanced switch rule
6051 * @hw: pointer to the hardware structure
6052 * @lkups: information on the words that needs to be looked up. All words
6053 * together makes one recipe
6054 * @lkups_cnt: num of entries in the lkups array
6055 * @rinfo: other information related to the rule that needs to be programmed
6056 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6057 * ignored is case of error.
6059 * This function can program only 1 rule at a time. The lkups is used to
6060 * describe the all the words that forms the "lookup" portion of the recipe.
6061 * These words can span multiple protocols. Callers to this function need to
6062 * pass in a list of protocol headers with lookup information along and mask
6063 * that determines which words are valid from the given protocol header.
6064 * rinfo describes other information related to this rule such as forwarding
6065 * IDs, priority of this rule, etc.
6068 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6069 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6070 struct ice_rule_query_data *added_entry)
6072 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6073 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6074 const struct ice_dummy_pkt_offsets *pkt_offsets;
6075 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6076 struct LIST_HEAD_TYPE *rule_head;
6077 struct ice_switch_info *sw;
6078 enum ice_status status;
6079 const u8 *pkt = NULL;
6085 return ICE_ERR_PARAM;
6087 /* get # of words we need to match */
6089 for (i = 0; i < lkups_cnt; i++) {
6092 ptr = (u16 *)&lkups[i].m_u;
6093 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6097 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6098 return ICE_ERR_PARAM;
6100 /* make sure that we can locate a dummy packet */
6101 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6104 status = ICE_ERR_PARAM;
6105 goto err_ice_add_adv_rule;
6108 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6109 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6110 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6111 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6114 vsi_handle = rinfo->sw_act.vsi_handle;
6115 if (!ice_is_vsi_valid(hw, vsi_handle))
6116 return ICE_ERR_PARAM;
6118 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6119 rinfo->sw_act.fwd_id.hw_vsi_id =
6120 ice_get_hw_vsi_num(hw, vsi_handle);
6121 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6122 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6124 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6127 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6129 /* we have to add VSI to VSI_LIST and increment vsi_count.
6130 * Also Update VSI list so that we can change forwarding rule
6131 * if the rule already exists, we will check if it exists with
6132 * same vsi_id, if not then add it to the VSI list if it already
6133 * exists if not then create a VSI list and add the existing VSI
6134 * ID and the new VSI ID to the list
6135 * We will add that VSI to the list
6137 status = ice_adv_add_update_vsi_list(hw, m_entry,
6138 &m_entry->rule_info,
6141 added_entry->rid = rid;
6142 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6143 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6147 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6148 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6150 return ICE_ERR_NO_MEMORY;
6151 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6152 switch (rinfo->sw_act.fltr_act) {
6153 case ICE_FWD_TO_VSI:
6154 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6155 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6156 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6159 act |= ICE_SINGLE_ACT_TO_Q;
6160 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6161 ICE_SINGLE_ACT_Q_INDEX_M;
6163 case ICE_FWD_TO_QGRP:
6164 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6165 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6166 act |= ICE_SINGLE_ACT_TO_Q;
6167 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6168 ICE_SINGLE_ACT_Q_INDEX_M;
6169 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6170 ICE_SINGLE_ACT_Q_REGION_M;
6172 case ICE_DROP_PACKET:
6173 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6174 ICE_SINGLE_ACT_VALID_BIT;
6177 status = ICE_ERR_CFG;
6178 goto err_ice_add_adv_rule;
6181 /* set the rule LOOKUP type based on caller specified 'RX'
6182 * instead of hardcoding it to be either LOOKUP_TX/RX
6184 * for 'RX' set the source to be the port number
6185 * for 'TX' set the source to be the source HW VSI number (determined
6189 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6190 s_rule->pdata.lkup_tx_rx.src =
6191 CPU_TO_LE16(hw->port_info->lport);
6193 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6194 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6197 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6198 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6200 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6201 pkt_len, pkt_offsets);
6203 goto err_ice_add_adv_rule;
6205 if (rinfo->tun_type != ICE_NON_TUN) {
6206 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6207 s_rule->pdata.lkup_tx_rx.hdr,
6210 goto err_ice_add_adv_rule;
6213 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6214 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6217 goto err_ice_add_adv_rule;
6218 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6219 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6221 status = ICE_ERR_NO_MEMORY;
6222 goto err_ice_add_adv_rule;
6225 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6226 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6227 ICE_NONDMA_TO_NONDMA);
6228 if (!adv_fltr->lkups) {
6229 status = ICE_ERR_NO_MEMORY;
6230 goto err_ice_add_adv_rule;
6233 adv_fltr->lkups_cnt = lkups_cnt;
6234 adv_fltr->rule_info = *rinfo;
6235 adv_fltr->rule_info.fltr_rule_id =
6236 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6237 sw = hw->switch_info;
6238 sw->recp_list[rid].adv_rule = true;
6239 rule_head = &sw->recp_list[rid].filt_rules;
6241 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6242 struct ice_fltr_info tmp_fltr;
6244 tmp_fltr.fltr_rule_id =
6245 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6246 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6247 tmp_fltr.fwd_id.hw_vsi_id =
6248 ice_get_hw_vsi_num(hw, vsi_handle);
6249 tmp_fltr.vsi_handle = vsi_handle;
6250 /* Update the previous switch rule of "forward to VSI" to
6253 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6255 goto err_ice_add_adv_rule;
6256 adv_fltr->vsi_count = 1;
6259 /* Add rule entry to book keeping list */
6260 LIST_ADD(&adv_fltr->list_entry, rule_head);
6262 added_entry->rid = rid;
6263 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6264 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6266 err_ice_add_adv_rule:
6267 if (status && adv_fltr) {
6268 ice_free(hw, adv_fltr->lkups);
6269 ice_free(hw, adv_fltr);
6272 ice_free(hw, s_rule);
6278 * ice_adv_rem_update_vsi_list
6279 * @hw: pointer to the hardware structure
6280 * @vsi_handle: VSI handle of the VSI to remove
6281 * @fm_list: filter management entry for which the VSI list management needs to
6284 static enum ice_status
6285 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6286 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6288 struct ice_vsi_list_map_info *vsi_list_info;
6289 enum ice_sw_lkup_type lkup_type;
6290 enum ice_status status;
6293 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6294 fm_list->vsi_count == 0)
6295 return ICE_ERR_PARAM;
6297 /* A rule with the VSI being removed does not exist */
6298 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6299 return ICE_ERR_DOES_NOT_EXIST;
6301 lkup_type = ICE_SW_LKUP_LAST;
6302 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6303 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6304 ice_aqc_opc_update_sw_rules,
6309 fm_list->vsi_count--;
6310 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6311 vsi_list_info = fm_list->vsi_list_info;
6312 if (fm_list->vsi_count == 1) {
6313 struct ice_fltr_info tmp_fltr;
6316 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6318 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6319 return ICE_ERR_OUT_OF_RANGE;
6321 /* Make sure VSI list is empty before removing it below */
6322 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6324 ice_aqc_opc_update_sw_rules,
6328 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6329 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6330 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6331 tmp_fltr.fwd_id.hw_vsi_id =
6332 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6333 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6334 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6336 /* Update the previous switch rule of "MAC forward to VSI" to
6337 * "MAC fwd to VSI list"
6339 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6341 ice_debug(hw, ICE_DBG_SW,
6342 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6343 tmp_fltr.fwd_id.hw_vsi_id, status);
6347 /* Remove the VSI list since it is no longer used */
6348 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6350 ice_debug(hw, ICE_DBG_SW,
6351 "Failed to remove VSI list %d, error %d\n",
6352 vsi_list_id, status);
6356 LIST_DEL(&vsi_list_info->list_entry);
6357 ice_free(hw, vsi_list_info);
6358 fm_list->vsi_list_info = NULL;
6365 * ice_rem_adv_rule - removes existing advanced switch rule
6366 * @hw: pointer to the hardware structure
6367 * @lkups: information on the words that needs to be looked up. All words
6368 * together makes one recipe
6369 * @lkups_cnt: num of entries in the lkups array
6370 * @rinfo: Its the pointer to the rule information for the rule
6372 * This function can be used to remove 1 rule at a time. The lkups is
6373 * used to describe all the words that forms the "lookup" portion of the
6374 * rule. These words can span multiple protocols. Callers to this function
6375 * need to pass in a list of protocol headers with lookup information along
6376 * and mask that determines which words are valid from the given protocol
6377 * header. rinfo describes other information related to this rule such as
6378 * forwarding IDs, priority of this rule, etc.
6381 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6382 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6384 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6385 struct ice_prot_lkup_ext lkup_exts;
6386 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6387 enum ice_status status = ICE_SUCCESS;
6388 bool remove_rule = false;
6389 u16 i, rid, vsi_handle;
6391 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6392 for (i = 0; i < lkups_cnt; i++) {
6395 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6398 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6403 /* Create any special protocol/offset pairs, such as looking at tunnel
6404 * bits by extracting metadata
6406 status = ice_add_special_words(rinfo, &lkup_exts);
6410 rid = ice_find_recp(hw, &lkup_exts);
6411 /* If did not find a recipe that match the existing criteria */
6412 if (rid == ICE_MAX_NUM_RECIPES)
6413 return ICE_ERR_PARAM;
6415 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6416 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6417 /* the rule is already removed */
6420 ice_acquire_lock(rule_lock);
6421 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6423 } else if (list_elem->vsi_count > 1) {
6424 list_elem->vsi_list_info->ref_cnt--;
6425 remove_rule = false;
6426 vsi_handle = rinfo->sw_act.vsi_handle;
6427 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6429 vsi_handle = rinfo->sw_act.vsi_handle;
6430 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6432 ice_release_lock(rule_lock);
6435 if (list_elem->vsi_count == 0)
6438 ice_release_lock(rule_lock);
6440 struct ice_aqc_sw_rules_elem *s_rule;
6443 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6445 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6448 return ICE_ERR_NO_MEMORY;
6449 s_rule->pdata.lkup_tx_rx.act = 0;
6450 s_rule->pdata.lkup_tx_rx.index =
6451 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6452 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6453 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6455 ice_aqc_opc_remove_sw_rules, NULL);
6456 if (status == ICE_SUCCESS) {
6457 ice_acquire_lock(rule_lock);
6458 LIST_DEL(&list_elem->list_entry);
6459 ice_free(hw, list_elem->lkups);
6460 ice_free(hw, list_elem);
6461 ice_release_lock(rule_lock);
6463 ice_free(hw, s_rule);
6469 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6470 * @hw: pointer to the hardware structure
6471 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6473 * This function is used to remove 1 rule at a time. The removal is based on
6474 * the remove_entry parameter. This function will remove rule for a given
6475 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6478 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6479 struct ice_rule_query_data *remove_entry)
6481 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6482 struct LIST_HEAD_TYPE *list_head;
6483 struct ice_adv_rule_info rinfo;
6484 struct ice_switch_info *sw;
6486 sw = hw->switch_info;
6487 if (!sw->recp_list[remove_entry->rid].recp_created)
6488 return ICE_ERR_PARAM;
6489 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6490 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6492 if (list_itr->rule_info.fltr_rule_id ==
6493 remove_entry->rule_id) {
6494 rinfo = list_itr->rule_info;
6495 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6496 return ice_rem_adv_rule(hw, list_itr->lkups,
6497 list_itr->lkups_cnt, &rinfo);
6500 return ICE_ERR_PARAM;
6504 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6506 * @hw: pointer to the hardware structure
6507 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6509 * This function is used to remove all the rules for a given VSI and as soon
6510 * as removing a rule fails, it will return immediately with the error code,
6511 * else it will return ICE_SUCCESS
6514 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6516 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6517 struct ice_vsi_list_map_info *map_info;
6518 struct LIST_HEAD_TYPE *list_head;
6519 struct ice_adv_rule_info rinfo;
6520 struct ice_switch_info *sw;
6521 enum ice_status status;
6522 u16 vsi_list_id = 0;
6525 sw = hw->switch_info;
6526 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6527 if (!sw->recp_list[rid].recp_created)
6529 if (!sw->recp_list[rid].adv_rule)
6531 list_head = &sw->recp_list[rid].filt_rules;
6533 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6534 ice_adv_fltr_mgmt_list_entry, list_entry) {
6535 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6539 rinfo = list_itr->rule_info;
6540 rinfo.sw_act.vsi_handle = vsi_handle;
6541 status = ice_rem_adv_rule(hw, list_itr->lkups,
6542 list_itr->lkups_cnt, &rinfo);
6552 * ice_replay_fltr - Replay all the filters stored by a specific list head
6553 * @hw: pointer to the hardware structure
6554 * @list_head: list for which filters needs to be replayed
6555 * @recp_id: Recipe ID for which rules need to be replayed
6557 static enum ice_status
6558 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6560 struct ice_fltr_mgmt_list_entry *itr;
6561 struct LIST_HEAD_TYPE l_head;
6562 enum ice_status status = ICE_SUCCESS;
6564 if (LIST_EMPTY(list_head))
6567 /* Move entries from the given list_head to a temporary l_head so that
6568 * they can be replayed. Otherwise when trying to re-add the same
6569 * filter, the function will return already exists
6571 LIST_REPLACE_INIT(list_head, &l_head);
6573 /* Mark the given list_head empty by reinitializing it so filters
6574 * could be added again by *handler
6576 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6578 struct ice_fltr_list_entry f_entry;
6580 f_entry.fltr_info = itr->fltr_info;
6581 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6582 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6583 if (status != ICE_SUCCESS)
6588 /* Add a filter per VSI separately */
6593 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6595 if (!ice_is_vsi_valid(hw, vsi_handle))
6598 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6599 f_entry.fltr_info.vsi_handle = vsi_handle;
6600 f_entry.fltr_info.fwd_id.hw_vsi_id =
6601 ice_get_hw_vsi_num(hw, vsi_handle);
6602 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6603 if (recp_id == ICE_SW_LKUP_VLAN)
6604 status = ice_add_vlan_internal(hw, &f_entry);
6606 status = ice_add_rule_internal(hw, recp_id,
6608 if (status != ICE_SUCCESS)
6613 /* Clear the filter management list */
6614 ice_rem_sw_rule_info(hw, &l_head);
6619 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6620 * @hw: pointer to the hardware structure
6622 * NOTE: This function does not clean up partially added filters on error.
6623 * It is up to caller of the function to issue a reset or fail early.
6625 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6627 struct ice_switch_info *sw = hw->switch_info;
6628 enum ice_status status = ICE_SUCCESS;
6631 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6632 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6634 status = ice_replay_fltr(hw, i, head);
6635 if (status != ICE_SUCCESS)
6642 * ice_replay_vsi_fltr - Replay filters for requested VSI
6643 * @hw: pointer to the hardware structure
6644 * @vsi_handle: driver VSI handle
6645 * @recp_id: Recipe ID for which rules need to be replayed
6646 * @list_head: list for which filters need to be replayed
6648 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6649 * It is required to pass valid VSI handle.
6651 static enum ice_status
6652 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6653 struct LIST_HEAD_TYPE *list_head)
6655 struct ice_fltr_mgmt_list_entry *itr;
6656 enum ice_status status = ICE_SUCCESS;
6659 if (LIST_EMPTY(list_head))
6661 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6663 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6665 struct ice_fltr_list_entry f_entry;
6667 f_entry.fltr_info = itr->fltr_info;
6668 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6669 itr->fltr_info.vsi_handle == vsi_handle) {
6670 /* update the src in case it is VSI num */
6671 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6672 f_entry.fltr_info.src = hw_vsi_id;
6673 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6674 if (status != ICE_SUCCESS)
6678 if (!itr->vsi_list_info ||
6679 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6681 /* Clearing it so that the logic can add it back */
6682 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6683 f_entry.fltr_info.vsi_handle = vsi_handle;
6684 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6685 /* update the src in case it is VSI num */
6686 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6687 f_entry.fltr_info.src = hw_vsi_id;
6688 if (recp_id == ICE_SW_LKUP_VLAN)
6689 status = ice_add_vlan_internal(hw, &f_entry);
6691 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6692 if (status != ICE_SUCCESS)
6700 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6701 * @hw: pointer to the hardware structure
6702 * @vsi_handle: driver VSI handle
6703 * @list_head: list for which filters need to be replayed
6705 * Replay the advanced rule for the given VSI.
6707 static enum ice_status
6708 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6709 struct LIST_HEAD_TYPE *list_head)
6711 struct ice_rule_query_data added_entry = { 0 };
6712 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6713 enum ice_status status = ICE_SUCCESS;
6715 if (LIST_EMPTY(list_head))
6717 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6719 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6720 u16 lk_cnt = adv_fltr->lkups_cnt;
6722 if (vsi_handle != rinfo->sw_act.vsi_handle)
6724 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6733 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6734 * @hw: pointer to the hardware structure
6735 * @vsi_handle: driver VSI handle
6737 * Replays filters for requested VSI via vsi_handle.
6739 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6741 struct ice_switch_info *sw = hw->switch_info;
6742 enum ice_status status;
6745 /* Update the recipes that were created */
6746 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6747 struct LIST_HEAD_TYPE *head;
6749 head = &sw->recp_list[i].filt_replay_rules;
6750 if (!sw->recp_list[i].adv_rule)
6751 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6753 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6754 if (status != ICE_SUCCESS)
6762 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6763 * @hw: pointer to the HW struct
6765 * Deletes the filter replay rules.
6767 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6769 struct ice_switch_info *sw = hw->switch_info;
6775 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6776 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6777 struct LIST_HEAD_TYPE *l_head;
6779 l_head = &sw->recp_list[i].filt_replay_rules;
6780 if (!sw->recp_list[i].adv_rule)
6781 ice_rem_sw_rule_info(hw, l_head);
6783 ice_rem_adv_rule_info(hw, l_head);