1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
61 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
69 { ICE_PROTOCOL_LAST, 0 },
73 u8 dummy_gre_tcp_packet[] = {
74 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
78 0x08, 0x00, /* ICE_ETYPE_OL 12 */
80 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x2F, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
86 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
87 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
90 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x00, 0x00, 0x00,
94 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
103 0x50, 0x02, 0x20, 0x00,
104 0x00, 0x00, 0x00, 0x00
108 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
110 { ICE_ETYPE_OL, 12 },
111 { ICE_IPV4_OFOS, 14 },
115 { ICE_UDP_ILOS, 76 },
116 { ICE_PROTOCOL_LAST, 0 },
120 u8 dummy_gre_udp_packet[] = {
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x08, 0x00, /* ICE_ETYPE_OL 12 */
127 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x2F, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
133 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
141 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
148 0x00, 0x08, 0x00, 0x00,
152 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
154 { ICE_ETYPE_OL, 12 },
155 { ICE_IPV4_OFOS, 14 },
159 { ICE_VXLAN_GPE, 42 },
163 { ICE_PROTOCOL_LAST, 0 },
167 u8 dummy_udp_tun_tcp_packet[] = {
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_OL 12 */
174 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
175 0x00, 0x01, 0x00, 0x00,
176 0x40, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
181 0x00, 0x46, 0x00, 0x00,
183 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
184 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
187 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
191 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
192 0x00, 0x01, 0x00, 0x00,
193 0x40, 0x06, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
198 0x00, 0x00, 0x00, 0x00,
199 0x00, 0x00, 0x00, 0x00,
200 0x50, 0x02, 0x20, 0x00,
201 0x00, 0x00, 0x00, 0x00
205 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
207 { ICE_ETYPE_OL, 12 },
208 { ICE_IPV4_OFOS, 14 },
212 { ICE_VXLAN_GPE, 42 },
215 { ICE_UDP_ILOS, 84 },
216 { ICE_PROTOCOL_LAST, 0 },
220 u8 dummy_udp_tun_udp_packet[] = {
221 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x08, 0x00, /* ICE_ETYPE_OL 12 */
227 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
228 0x00, 0x01, 0x00, 0x00,
229 0x00, 0x11, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
234 0x00, 0x3a, 0x00, 0x00,
236 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
237 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
251 0x00, 0x08, 0x00, 0x00,
255 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
257 { ICE_ETYPE_OL, 12 },
258 { ICE_IPV4_OFOS, 14 },
259 { ICE_UDP_ILOS, 34 },
260 { ICE_PROTOCOL_LAST, 0 },
264 dummy_udp_packet[] = {
265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x08, 0x00, /* ICE_ETYPE_OL 12 */
271 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
272 0x00, 0x01, 0x00, 0x00,
273 0x00, 0x11, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00,
277 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
278 0x00, 0x08, 0x00, 0x00,
280 0x00, 0x00, /* 2 bytes for 4 byte alignment */
284 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
286 { ICE_ETYPE_OL, 12 },
287 { ICE_IPV4_OFOS, 14 },
289 { ICE_PROTOCOL_LAST, 0 },
293 dummy_tcp_packet[] = {
294 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
295 0x00, 0x00, 0x00, 0x00,
296 0x00, 0x00, 0x00, 0x00,
298 0x08, 0x00, /* ICE_ETYPE_OL 12 */
300 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
301 0x00, 0x01, 0x00, 0x00,
302 0x00, 0x06, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
307 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x50, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, /* 2 bytes for 4 byte alignment */
316 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
318 { ICE_ETYPE_OL, 12 },
319 { ICE_IPV6_OFOS, 14 },
321 { ICE_PROTOCOL_LAST, 0 },
325 dummy_tcp_ipv6_packet[] = {
326 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
332 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
333 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x50, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, /* 2 bytes for 4 byte alignment */
353 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
355 { ICE_ETYPE_OL, 12 },
356 { ICE_IPV6_OFOS, 14 },
357 { ICE_UDP_ILOS, 54 },
358 { ICE_PROTOCOL_LAST, 0 },
362 dummy_udp_ipv6_packet[] = {
363 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
364 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00,
367 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
369 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
370 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
381 0x00, 0x08, 0x00, 0x00,
383 0x00, 0x00, /* 2 bytes for 4 byte alignment */
387 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
389 { ICE_IPV4_OFOS, 14 },
392 { ICE_PROTOCOL_LAST, 0 },
396 dummy_udp_gtp_packet[] = {
397 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
402 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
403 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x11, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
409 0x00, 0x1c, 0x00, 0x00,
411 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
412 0x00, 0x00, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x85,
415 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
416 0x00, 0x00, 0x00, 0x00,
420 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
422 { ICE_VLAN_OFOS, 14},
424 { ICE_PROTOCOL_LAST, 0 },
428 dummy_pppoe_packet[] = {
429 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
430 0x00, 0x00, 0x00, 0x00,
431 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
436 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 18 */
437 0x00, 0x4e, 0x00, 0x21,
439 0x45, 0x00, 0x00, 0x30, /* PDU */
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x11, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
446 /* this is a recipe to profile association bitmap */
447 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
448 ICE_MAX_NUM_PROFILES);
450 /* this is a profile to recipe association bitmap */
451 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
452 ICE_MAX_NUM_RECIPES);
454 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
457 * ice_collect_result_idx - copy result index values
458 * @buf: buffer that contains the result index
459 * @recp: the recipe struct to copy data into
461 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
462 struct ice_sw_recipe *recp)
464 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
465 ice_set_bit(buf->content.result_indx &
466 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
470 * ice_init_possible_res_bm - initialize possible result bitmap
471 * @pos_result_bm: pointer to the bitmap to initialize
473 static void ice_init_possible_res_bm(ice_bitmap_t *pos_result_bm)
477 ice_zero_bitmap(pos_result_bm, ICE_MAX_FV_WORDS);
479 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
480 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
481 ice_set_bit(bit, pos_result_bm);
485 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
486 * @hw: pointer to hardware structure
487 * @recps: struct that we need to populate
488 * @rid: recipe ID that we are populating
489 * @refresh_required: true if we should get recipe to profile mapping from FW
491 * This function is used to populate all the necessary entries into our
492 * bookkeeping so that we have a current list of all the recipes that are
493 * programmed in the firmware.
495 static enum ice_status
496 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
497 bool *refresh_required)
499 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
500 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
501 struct ice_aqc_recipe_data_elem *tmp;
502 u16 num_recps = ICE_MAX_NUM_RECIPES;
503 struct ice_prot_lkup_ext *lkup_exts;
504 u16 i, sub_recps, fv_word_idx = 0;
505 enum ice_status status;
507 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
508 ice_init_possible_res_bm(possible_idx);
510 /* we need a buffer big enough to accommodate all the recipes */
511 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
512 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
514 return ICE_ERR_NO_MEMORY;
516 tmp[0].recipe_indx = rid;
517 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
518 /* non-zero status meaning recipe doesn't exist */
522 /* Get recipe to profile map so that we can get the fv from lkups that
523 * we read for a recipe from FW. Since we want to minimize the number of
524 * times we make this FW call, just make one call and cache the copy
525 * until a new recipe is added. This operation is only required the
526 * first time to get the changes from FW. Then to search existing
527 * entries we don't need to update the cache again until another recipe
530 if (*refresh_required) {
531 ice_get_recp_to_prof_map(hw);
532 *refresh_required = false;
535 /* Start populating all the entries for recps[rid] based on lkups from
536 * firmware. Note that we are only creating the root recipe in our
539 lkup_exts = &recps[rid].lkup_exts;
541 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
542 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
543 struct ice_recp_grp_entry *rg_entry;
544 u8 prof_id, idx, prot = 0;
548 rg_entry = (struct ice_recp_grp_entry *)
549 ice_malloc(hw, sizeof(*rg_entry));
551 status = ICE_ERR_NO_MEMORY;
555 idx = root_bufs.recipe_indx;
556 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
558 /* Mark all result indices in this chain */
559 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
560 ice_set_bit(root_bufs.content.result_indx &
561 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
563 /* get the first profile that is associated with rid */
564 prof_id = ice_find_first_bit(recipe_to_profile[idx],
565 ICE_MAX_NUM_PROFILES);
566 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
567 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
569 rg_entry->fv_idx[i] = lkup_indx;
570 rg_entry->fv_mask[i] =
571 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
573 /* If the recipe is a chained recipe then all its
574 * child recipe's result will have a result index.
575 * To fill fv_words we should not use those result
576 * index, we only need the protocol ids and offsets.
577 * We will skip all the fv_idx which stores result
578 * index in them. We also need to skip any fv_idx which
579 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
580 * valid offset value.
582 if (ice_is_bit_set(possible_idx, rg_entry->fv_idx[i]) ||
583 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
584 rg_entry->fv_idx[i] == 0)
587 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
588 rg_entry->fv_idx[i], &prot, &off);
589 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
590 lkup_exts->fv_words[fv_word_idx].off = off;
593 /* populate rg_list with the data from the child entry of this
596 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
598 /* Propagate some data to the recipe database */
599 recps[idx].is_root = is_root;
600 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
601 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
602 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
603 recps[idx].chain_idx = root_bufs.content.result_indx &
604 ~ICE_AQ_RECIPE_RESULT_EN;
605 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
607 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
613 /* Only do the following for root recipes entries */
614 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
615 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
616 recps[idx].root_rid = root_bufs.content.rid &
617 ~ICE_AQ_RECIPE_ID_IS_ROOT;
618 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
621 /* Complete initialization of the root recipe entry */
622 lkup_exts->n_val_words = fv_word_idx;
623 recps[rid].big_recp = (num_recps > 1);
624 recps[rid].n_grp_count = num_recps;
625 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
626 ice_calloc(hw, recps[rid].n_grp_count,
627 sizeof(struct ice_aqc_recipe_data_elem));
628 if (!recps[rid].root_buf)
631 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
632 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
634 /* Copy result indexes */
635 ice_memcpy(recps[rid].res_idxs, result_bm, sizeof(recps[rid].res_idxs),
636 ICE_NONDMA_TO_NONDMA);
637 recps[rid].recp_created = true;
645 * ice_get_recp_to_prof_map - updates recipe to profile mapping
646 * @hw: pointer to hardware structure
648 * This function is used to populate recipe_to_profile matrix where index to
649 * this array is the recipe ID and the element is the mapping of which profiles
650 * is this recipe mapped to.
653 ice_get_recp_to_prof_map(struct ice_hw *hw)
655 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
658 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
661 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
662 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
663 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
665 ice_memcpy(profile_to_recipe[i], r_bitmap,
666 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
667 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
668 if (ice_is_bit_set(r_bitmap, j))
669 ice_set_bit(i, recipe_to_profile[j]);
674 * ice_init_def_sw_recp - initialize the recipe book keeping tables
675 * @hw: pointer to the HW struct
677 * Allocate memory for the entire recipe table and initialize the structures/
678 * entries corresponding to basic recipes.
680 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
682 struct ice_sw_recipe *recps;
685 recps = (struct ice_sw_recipe *)
686 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
688 return ICE_ERR_NO_MEMORY;
690 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
691 recps[i].root_rid = i;
692 INIT_LIST_HEAD(&recps[i].filt_rules);
693 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
694 INIT_LIST_HEAD(&recps[i].rg_list);
695 ice_init_lock(&recps[i].filt_rule_lock);
698 hw->switch_info->recp_list = recps;
704 * ice_aq_get_sw_cfg - get switch configuration
705 * @hw: pointer to the hardware structure
706 * @buf: pointer to the result buffer
707 * @buf_size: length of the buffer available for response
708 * @req_desc: pointer to requested descriptor
709 * @num_elems: pointer to number of elements
710 * @cd: pointer to command details structure or NULL
712 * Get switch configuration (0x0200) to be placed in 'buff'.
713 * This admin command returns information such as initial VSI/port number
714 * and switch ID it belongs to.
716 * NOTE: *req_desc is both an input/output parameter.
717 * The caller of this function first calls this function with *request_desc set
718 * to 0. If the response from f/w has *req_desc set to 0, all the switch
719 * configuration information has been returned; if non-zero (meaning not all
720 * the information was returned), the caller should call this function again
721 * with *req_desc set to the previous value returned by f/w to get the
722 * next block of switch configuration information.
724 * *num_elems is output only parameter. This reflects the number of elements
725 * in response buffer. The caller of this function to use *num_elems while
726 * parsing the response buffer.
728 static enum ice_status
729 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
730 u16 buf_size, u16 *req_desc, u16 *num_elems,
731 struct ice_sq_cd *cd)
733 struct ice_aqc_get_sw_cfg *cmd;
734 enum ice_status status;
735 struct ice_aq_desc desc;
737 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
738 cmd = &desc.params.get_sw_conf;
739 cmd->element = CPU_TO_LE16(*req_desc);
741 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
743 *req_desc = LE16_TO_CPU(cmd->element);
744 *num_elems = LE16_TO_CPU(cmd->num_elems);
751 * ice_alloc_sw - allocate resources specific to switch
752 * @hw: pointer to the HW struct
753 * @ena_stats: true to turn on VEB stats
754 * @shared_res: true for shared resource, false for dedicated resource
755 * @sw_id: switch ID returned
756 * @counter_id: VEB counter ID returned
758 * allocates switch resources (SWID and VEB counter) (0x0208)
761 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
764 struct ice_aqc_alloc_free_res_elem *sw_buf;
765 struct ice_aqc_res_elem *sw_ele;
766 enum ice_status status;
769 buf_len = sizeof(*sw_buf);
770 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
771 ice_malloc(hw, buf_len);
773 return ICE_ERR_NO_MEMORY;
775 /* Prepare buffer for switch ID.
776 * The number of resource entries in buffer is passed as 1 since only a
777 * single switch/VEB instance is allocated, and hence a single sw_id
780 sw_buf->num_elems = CPU_TO_LE16(1);
782 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
783 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
784 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
786 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
787 ice_aqc_opc_alloc_res, NULL);
790 goto ice_alloc_sw_exit;
792 sw_ele = &sw_buf->elem[0];
793 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
796 /* Prepare buffer for VEB Counter */
797 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
798 struct ice_aqc_alloc_free_res_elem *counter_buf;
799 struct ice_aqc_res_elem *counter_ele;
801 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
802 ice_malloc(hw, buf_len);
804 status = ICE_ERR_NO_MEMORY;
805 goto ice_alloc_sw_exit;
808 /* The number of resource entries in buffer is passed as 1 since
809 * only a single switch/VEB instance is allocated, and hence a
810 * single VEB counter is requested.
812 counter_buf->num_elems = CPU_TO_LE16(1);
813 counter_buf->res_type =
814 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
815 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
816 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
820 ice_free(hw, counter_buf);
821 goto ice_alloc_sw_exit;
823 counter_ele = &counter_buf->elem[0];
824 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
825 ice_free(hw, counter_buf);
829 ice_free(hw, sw_buf);
834 * ice_free_sw - free resources specific to switch
835 * @hw: pointer to the HW struct
836 * @sw_id: switch ID returned
837 * @counter_id: VEB counter ID returned
839 * free switch resources (SWID and VEB counter) (0x0209)
841 * NOTE: This function frees multiple resources. It continues
842 * releasing other resources even after it encounters error.
843 * The error code returned is the last error it encountered.
845 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
847 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
848 enum ice_status status, ret_status;
851 buf_len = sizeof(*sw_buf);
852 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
853 ice_malloc(hw, buf_len);
855 return ICE_ERR_NO_MEMORY;
857 /* Prepare buffer to free for switch ID res.
858 * The number of resource entries in buffer is passed as 1 since only a
859 * single switch/VEB instance is freed, and hence a single sw_id
862 sw_buf->num_elems = CPU_TO_LE16(1);
863 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
864 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
866 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
867 ice_aqc_opc_free_res, NULL);
870 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
872 /* Prepare buffer to free for VEB Counter resource */
873 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
874 ice_malloc(hw, buf_len);
876 ice_free(hw, sw_buf);
877 return ICE_ERR_NO_MEMORY;
880 /* The number of resource entries in buffer is passed as 1 since only a
881 * single switch/VEB instance is freed, and hence a single VEB counter
884 counter_buf->num_elems = CPU_TO_LE16(1);
885 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
886 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
888 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
889 ice_aqc_opc_free_res, NULL);
891 ice_debug(hw, ICE_DBG_SW,
892 "VEB counter resource could not be freed\n");
896 ice_free(hw, counter_buf);
897 ice_free(hw, sw_buf);
903 * @hw: pointer to the HW struct
904 * @vsi_ctx: pointer to a VSI context struct
905 * @cd: pointer to command details structure or NULL
907 * Add a VSI context to the hardware (0x0210)
910 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
911 struct ice_sq_cd *cd)
913 struct ice_aqc_add_update_free_vsi_resp *res;
914 struct ice_aqc_add_get_update_free_vsi *cmd;
915 struct ice_aq_desc desc;
916 enum ice_status status;
918 cmd = &desc.params.vsi_cmd;
919 res = &desc.params.add_update_free_vsi_res;
921 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
923 if (!vsi_ctx->alloc_from_pool)
924 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
925 ICE_AQ_VSI_IS_VALID);
927 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
929 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
931 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
932 sizeof(vsi_ctx->info), cd);
935 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
936 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
937 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
945 * @hw: pointer to the HW struct
946 * @vsi_ctx: pointer to a VSI context struct
947 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
948 * @cd: pointer to command details structure or NULL
950 * Free VSI context info from hardware (0x0213)
953 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
954 bool keep_vsi_alloc, struct ice_sq_cd *cd)
956 struct ice_aqc_add_update_free_vsi_resp *resp;
957 struct ice_aqc_add_get_update_free_vsi *cmd;
958 struct ice_aq_desc desc;
959 enum ice_status status;
961 cmd = &desc.params.vsi_cmd;
962 resp = &desc.params.add_update_free_vsi_res;
964 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
966 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
968 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
970 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
972 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
973 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
981 * @hw: pointer to the HW struct
982 * @vsi_ctx: pointer to a VSI context struct
983 * @cd: pointer to command details structure or NULL
985 * Update VSI context in the hardware (0x0211)
988 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
989 struct ice_sq_cd *cd)
991 struct ice_aqc_add_update_free_vsi_resp *resp;
992 struct ice_aqc_add_get_update_free_vsi *cmd;
993 struct ice_aq_desc desc;
994 enum ice_status status;
996 cmd = &desc.params.vsi_cmd;
997 resp = &desc.params.add_update_free_vsi_res;
999 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1001 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1003 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1005 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1006 sizeof(vsi_ctx->info), cd);
1009 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1010 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1017 * ice_is_vsi_valid - check whether the VSI is valid or not
1018 * @hw: pointer to the HW struct
1019 * @vsi_handle: VSI handle
1021 * check whether the VSI is valid or not
1023 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1025 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1029 * ice_get_hw_vsi_num - return the HW VSI number
1030 * @hw: pointer to the HW struct
1031 * @vsi_handle: VSI handle
1033 * return the HW VSI number
1034 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1036 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1038 return hw->vsi_ctx[vsi_handle]->vsi_num;
1042 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1043 * @hw: pointer to the HW struct
1044 * @vsi_handle: VSI handle
1046 * return the VSI context entry for a given VSI handle
1048 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1050 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1054 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1055 * @hw: pointer to the HW struct
1056 * @vsi_handle: VSI handle
1057 * @vsi: VSI context pointer
1059 * save the VSI context entry for a given VSI handle
1062 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1064 hw->vsi_ctx[vsi_handle] = vsi;
1068 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1069 * @hw: pointer to the HW struct
1070 * @vsi_handle: VSI handle
1072 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1074 struct ice_vsi_ctx *vsi;
1077 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1080 ice_for_each_traffic_class(i) {
1081 if (vsi->lan_q_ctx[i]) {
1082 ice_free(hw, vsi->lan_q_ctx[i]);
1083 vsi->lan_q_ctx[i] = NULL;
1089 * ice_clear_vsi_ctx - clear the VSI context entry
1090 * @hw: pointer to the HW struct
1091 * @vsi_handle: VSI handle
1093 * clear the VSI context entry
1095 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1097 struct ice_vsi_ctx *vsi;
1099 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1101 ice_clear_vsi_q_ctx(hw, vsi_handle);
1103 hw->vsi_ctx[vsi_handle] = NULL;
1108 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1109 * @hw: pointer to the HW struct
1111 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1115 for (i = 0; i < ICE_MAX_VSI; i++)
1116 ice_clear_vsi_ctx(hw, i);
1120 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1121 * @hw: pointer to the HW struct
1122 * @vsi_handle: unique VSI handle provided by drivers
1123 * @vsi_ctx: pointer to a VSI context struct
1124 * @cd: pointer to command details structure or NULL
1126 * Add a VSI context to the hardware also add it into the VSI handle list.
1127 * If this function gets called after reset for existing VSIs then update
1128 * with the new HW VSI number in the corresponding VSI handle list entry.
1131 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1132 struct ice_sq_cd *cd)
1134 struct ice_vsi_ctx *tmp_vsi_ctx;
1135 enum ice_status status;
1137 if (vsi_handle >= ICE_MAX_VSI)
1138 return ICE_ERR_PARAM;
1139 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1142 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1144 /* Create a new VSI context */
1145 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1146 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1148 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1149 return ICE_ERR_NO_MEMORY;
1151 *tmp_vsi_ctx = *vsi_ctx;
1153 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1155 /* update with new HW VSI num */
1156 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1163 * ice_free_vsi- free VSI context from hardware and VSI handle list
1164 * @hw: pointer to the HW struct
1165 * @vsi_handle: unique VSI handle
1166 * @vsi_ctx: pointer to a VSI context struct
1167 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1168 * @cd: pointer to command details structure or NULL
1170 * Free VSI context info from hardware as well as from VSI handle list
1173 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1174 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1176 enum ice_status status;
1178 if (!ice_is_vsi_valid(hw, vsi_handle))
1179 return ICE_ERR_PARAM;
1180 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1181 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1183 ice_clear_vsi_ctx(hw, vsi_handle);
1189 * @hw: pointer to the HW struct
1190 * @vsi_handle: unique VSI handle
1191 * @vsi_ctx: pointer to a VSI context struct
1192 * @cd: pointer to command details structure or NULL
1194 * Update VSI context in the hardware
1197 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1198 struct ice_sq_cd *cd)
1200 if (!ice_is_vsi_valid(hw, vsi_handle))
1201 return ICE_ERR_PARAM;
1202 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1203 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1207 * ice_aq_get_vsi_params
1208 * @hw: pointer to the HW struct
1209 * @vsi_ctx: pointer to a VSI context struct
1210 * @cd: pointer to command details structure or NULL
1212 * Get VSI context info from hardware (0x0212)
1215 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1216 struct ice_sq_cd *cd)
1218 struct ice_aqc_add_get_update_free_vsi *cmd;
1219 struct ice_aqc_get_vsi_resp *resp;
1220 struct ice_aq_desc desc;
1221 enum ice_status status;
1223 cmd = &desc.params.vsi_cmd;
1224 resp = &desc.params.get_vsi_resp;
1226 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1228 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1230 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1231 sizeof(vsi_ctx->info), cd);
1233 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1235 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1236 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1243 * ice_aq_add_update_mir_rule - add/update a mirror rule
1244 * @hw: pointer to the HW struct
1245 * @rule_type: Rule Type
1246 * @dest_vsi: VSI number to which packets will be mirrored
1247 * @count: length of the list
1248 * @mr_buf: buffer for list of mirrored VSI numbers
1249 * @cd: pointer to command details structure or NULL
1252 * Add/Update Mirror Rule (0x260).
1255 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1256 u16 count, struct ice_mir_rule_buf *mr_buf,
1257 struct ice_sq_cd *cd, u16 *rule_id)
1259 struct ice_aqc_add_update_mir_rule *cmd;
1260 struct ice_aq_desc desc;
1261 enum ice_status status;
1262 __le16 *mr_list = NULL;
1265 switch (rule_type) {
1266 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1267 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1268 /* Make sure count and mr_buf are set for these rule_types */
1269 if (!(count && mr_buf))
1270 return ICE_ERR_PARAM;
1272 buf_size = count * sizeof(__le16);
1273 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1275 return ICE_ERR_NO_MEMORY;
1277 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1278 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1279 /* Make sure count and mr_buf are not set for these
1282 if (count || mr_buf)
1283 return ICE_ERR_PARAM;
1286 ice_debug(hw, ICE_DBG_SW,
1287 "Error due to unsupported rule_type %u\n", rule_type);
1288 return ICE_ERR_OUT_OF_RANGE;
1291 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1293 /* Pre-process 'mr_buf' items for add/update of virtual port
1294 * ingress/egress mirroring (but not physical port ingress/egress
1300 for (i = 0; i < count; i++) {
1303 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1305 /* Validate specified VSI number, make sure it is less
1306 * than ICE_MAX_VSI, if not return with error.
1308 if (id >= ICE_MAX_VSI) {
1309 ice_debug(hw, ICE_DBG_SW,
1310 "Error VSI index (%u) out-of-range\n",
1312 ice_free(hw, mr_list);
1313 return ICE_ERR_OUT_OF_RANGE;
1316 /* add VSI to mirror rule */
1319 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1320 else /* remove VSI from mirror rule */
1321 mr_list[i] = CPU_TO_LE16(id);
1325 cmd = &desc.params.add_update_rule;
1326 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1327 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1328 ICE_AQC_RULE_ID_VALID_M);
1329 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1330 cmd->num_entries = CPU_TO_LE16(count);
1331 cmd->dest = CPU_TO_LE16(dest_vsi);
1333 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1335 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1337 ice_free(hw, mr_list);
1343 * ice_aq_delete_mir_rule - delete a mirror rule
1344 * @hw: pointer to the HW struct
1345 * @rule_id: Mirror rule ID (to be deleted)
1346 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1347 * otherwise it is returned to the shared pool
1348 * @cd: pointer to command details structure or NULL
1350 * Delete Mirror Rule (0x261).
1353 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1354 struct ice_sq_cd *cd)
1356 struct ice_aqc_delete_mir_rule *cmd;
1357 struct ice_aq_desc desc;
1359 /* rule_id should be in the range 0...63 */
1360 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1361 return ICE_ERR_OUT_OF_RANGE;
1363 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1365 cmd = &desc.params.del_rule;
1366 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1367 cmd->rule_id = CPU_TO_LE16(rule_id);
1370 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1372 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1376 * ice_aq_alloc_free_vsi_list
1377 * @hw: pointer to the HW struct
1378 * @vsi_list_id: VSI list ID returned or used for lookup
1379 * @lkup_type: switch rule filter lookup type
1380 * @opc: switch rules population command type - pass in the command opcode
1382 * allocates or free a VSI list resource
1384 static enum ice_status
1385 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1386 enum ice_sw_lkup_type lkup_type,
1387 enum ice_adminq_opc opc)
1389 struct ice_aqc_alloc_free_res_elem *sw_buf;
1390 struct ice_aqc_res_elem *vsi_ele;
1391 enum ice_status status;
1394 buf_len = sizeof(*sw_buf);
1395 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1396 ice_malloc(hw, buf_len);
1398 return ICE_ERR_NO_MEMORY;
1399 sw_buf->num_elems = CPU_TO_LE16(1);
1401 if (lkup_type == ICE_SW_LKUP_MAC ||
1402 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1403 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1404 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1405 lkup_type == ICE_SW_LKUP_PROMISC ||
1406 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1407 lkup_type == ICE_SW_LKUP_LAST) {
1408 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1409 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1411 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1413 status = ICE_ERR_PARAM;
1414 goto ice_aq_alloc_free_vsi_list_exit;
1417 if (opc == ice_aqc_opc_free_res)
1418 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1420 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1422 goto ice_aq_alloc_free_vsi_list_exit;
1424 if (opc == ice_aqc_opc_alloc_res) {
1425 vsi_ele = &sw_buf->elem[0];
1426 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1429 ice_aq_alloc_free_vsi_list_exit:
1430 ice_free(hw, sw_buf);
1435 * ice_aq_set_storm_ctrl - Sets storm control configuration
1436 * @hw: pointer to the HW struct
1437 * @bcast_thresh: represents the upper threshold for broadcast storm control
1438 * @mcast_thresh: represents the upper threshold for multicast storm control
1439 * @ctl_bitmask: storm control control knobs
1441 * Sets the storm control configuration (0x0280)
1444 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1447 struct ice_aqc_storm_cfg *cmd;
1448 struct ice_aq_desc desc;
1450 cmd = &desc.params.storm_conf;
1452 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1454 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1455 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1456 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1458 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1462 * ice_aq_get_storm_ctrl - gets storm control configuration
1463 * @hw: pointer to the HW struct
1464 * @bcast_thresh: represents the upper threshold for broadcast storm control
1465 * @mcast_thresh: represents the upper threshold for multicast storm control
1466 * @ctl_bitmask: storm control control knobs
1468 * Gets the storm control configuration (0x0281)
1471 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1474 enum ice_status status;
1475 struct ice_aq_desc desc;
1477 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1479 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1481 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1484 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1487 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1490 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1497 * ice_aq_sw_rules - add/update/remove switch rules
1498 * @hw: pointer to the HW struct
1499 * @rule_list: pointer to switch rule population list
1500 * @rule_list_sz: total size of the rule list in bytes
1501 * @num_rules: number of switch rules in the rule_list
1502 * @opc: switch rules population command type - pass in the command opcode
1503 * @cd: pointer to command details structure or NULL
1505 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1507 static enum ice_status
1508 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1509 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1511 struct ice_aq_desc desc;
1513 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1515 if (opc != ice_aqc_opc_add_sw_rules &&
1516 opc != ice_aqc_opc_update_sw_rules &&
1517 opc != ice_aqc_opc_remove_sw_rules)
1518 return ICE_ERR_PARAM;
1520 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1522 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1523 desc.params.sw_rules.num_rules_fltr_entry_index =
1524 CPU_TO_LE16(num_rules);
1525 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1529 * ice_aq_add_recipe - add switch recipe
1530 * @hw: pointer to the HW struct
1531 * @s_recipe_list: pointer to switch rule population list
1532 * @num_recipes: number of switch recipes in the list
1533 * @cd: pointer to command details structure or NULL
1538 ice_aq_add_recipe(struct ice_hw *hw,
1539 struct ice_aqc_recipe_data_elem *s_recipe_list,
1540 u16 num_recipes, struct ice_sq_cd *cd)
1542 struct ice_aqc_add_get_recipe *cmd;
1543 struct ice_aq_desc desc;
1546 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1547 cmd = &desc.params.add_get_recipe;
1548 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1550 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1551 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1553 buf_size = num_recipes * sizeof(*s_recipe_list);
1555 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1559 * ice_aq_get_recipe - get switch recipe
1560 * @hw: pointer to the HW struct
1561 * @s_recipe_list: pointer to switch rule population list
1562 * @num_recipes: pointer to the number of recipes (input and output)
1563 * @recipe_root: root recipe number of recipe(s) to retrieve
1564 * @cd: pointer to command details structure or NULL
1568 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1569 * On output, *num_recipes will equal the number of entries returned in
1572 * The caller must supply enough space in s_recipe_list to hold all possible
1573 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1576 ice_aq_get_recipe(struct ice_hw *hw,
1577 struct ice_aqc_recipe_data_elem *s_recipe_list,
1578 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1580 struct ice_aqc_add_get_recipe *cmd;
1581 struct ice_aq_desc desc;
1582 enum ice_status status;
1585 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1586 return ICE_ERR_PARAM;
1588 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1589 cmd = &desc.params.add_get_recipe;
1590 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1592 cmd->return_index = CPU_TO_LE16(recipe_root);
1593 cmd->num_sub_recipes = 0;
1595 buf_size = *num_recipes * sizeof(*s_recipe_list);
1597 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1598 /* cppcheck-suppress constArgument */
1599 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1605 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1606 * @hw: pointer to the HW struct
1607 * @profile_id: package profile ID to associate the recipe with
1608 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1609 * @cd: pointer to command details structure or NULL
1610 * Recipe to profile association (0x0291)
1613 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1614 struct ice_sq_cd *cd)
1616 struct ice_aqc_recipe_to_profile *cmd;
1617 struct ice_aq_desc desc;
1619 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1620 cmd = &desc.params.recipe_to_profile;
1621 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1622 cmd->profile_id = CPU_TO_LE16(profile_id);
1623 /* Set the recipe ID bit in the bitmask to let the device know which
1624 * profile we are associating the recipe to
1626 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1627 ICE_NONDMA_TO_NONDMA);
1629 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1633 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1634 * @hw: pointer to the HW struct
1635 * @profile_id: package profile ID to associate the recipe with
1636 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1637 * @cd: pointer to command details structure or NULL
1638 * Associate profile ID with given recipe (0x0293)
1641 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1642 struct ice_sq_cd *cd)
1644 struct ice_aqc_recipe_to_profile *cmd;
1645 struct ice_aq_desc desc;
1646 enum ice_status status;
1648 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1649 cmd = &desc.params.recipe_to_profile;
1650 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1651 cmd->profile_id = CPU_TO_LE16(profile_id);
1653 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1655 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1656 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1662 * ice_alloc_recipe - add recipe resource
1663 * @hw: pointer to the hardware structure
1664 * @rid: recipe ID returned as response to AQ call
1666 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1668 struct ice_aqc_alloc_free_res_elem *sw_buf;
1669 enum ice_status status;
1672 buf_len = sizeof(*sw_buf);
1673 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1675 return ICE_ERR_NO_MEMORY;
1677 sw_buf->num_elems = CPU_TO_LE16(1);
1678 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1679 ICE_AQC_RES_TYPE_S) |
1680 ICE_AQC_RES_TYPE_FLAG_SHARED);
1681 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1682 ice_aqc_opc_alloc_res, NULL);
1684 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1685 ice_free(hw, sw_buf);
1690 /* ice_init_port_info - Initialize port_info with switch configuration data
1691 * @pi: pointer to port_info
1692 * @vsi_port_num: VSI number or port number
1693 * @type: Type of switch element (port or VSI)
1694 * @swid: switch ID of the switch the element is attached to
1695 * @pf_vf_num: PF or VF number
1696 * @is_vf: true if the element is a VF, false otherwise
1699 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1700 u16 swid, u16 pf_vf_num, bool is_vf)
1703 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1704 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1706 pi->pf_vf_num = pf_vf_num;
1708 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1709 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1712 ice_debug(pi->hw, ICE_DBG_SW,
1713 "incorrect VSI/port type received\n");
1718 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1719 * @hw: pointer to the hardware structure
1721 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1723 struct ice_aqc_get_sw_cfg_resp *rbuf;
1724 enum ice_status status;
1725 u16 num_total_ports;
1731 num_total_ports = 1;
1733 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1734 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1737 return ICE_ERR_NO_MEMORY;
1739 /* Multiple calls to ice_aq_get_sw_cfg may be required
1740 * to get all the switch configuration information. The need
1741 * for additional calls is indicated by ice_aq_get_sw_cfg
1742 * writing a non-zero value in req_desc
1745 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1746 &req_desc, &num_elems, NULL);
1751 for (i = 0; i < num_elems; i++) {
1752 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1753 u16 pf_vf_num, swid, vsi_port_num;
1757 ele = rbuf[i].elements;
1758 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1759 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1761 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1762 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1764 swid = LE16_TO_CPU(ele->swid);
1766 if (LE16_TO_CPU(ele->pf_vf_num) &
1767 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1770 type = LE16_TO_CPU(ele->vsi_port_num) >>
1771 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1774 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1775 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1776 if (j == num_total_ports) {
1777 ice_debug(hw, ICE_DBG_SW,
1778 "more ports than expected\n");
1779 status = ICE_ERR_CFG;
1782 ice_init_port_info(hw->port_info,
1783 vsi_port_num, type, swid,
1791 } while (req_desc && !status);
1794 ice_free(hw, (void *)rbuf);
1799 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1800 * @hw: pointer to the hardware structure
1801 * @fi: filter info structure to fill/update
1803 * This helper function populates the lb_en and lan_en elements of the provided
1804 * ice_fltr_info struct using the switch's type and characteristics of the
1805 * switch rule being configured.
1807 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1811 if ((fi->flag & ICE_FLTR_TX) &&
1812 (fi->fltr_act == ICE_FWD_TO_VSI ||
1813 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1814 fi->fltr_act == ICE_FWD_TO_Q ||
1815 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1816 /* Setting LB for prune actions will result in replicated
1817 * packets to the internal switch that will be dropped.
1819 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1822 /* Set lan_en to TRUE if
1823 * 1. The switch is a VEB AND
1825 * 2.1 The lookup is a directional lookup like ethertype,
1826 * promiscuous, ethertype-MAC, promiscuous-VLAN
1827 * and default-port OR
1828 * 2.2 The lookup is VLAN, OR
1829 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1830 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1834 * The switch is a VEPA.
1836 * In all other cases, the LAN enable has to be set to false.
1839 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1840 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1841 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1842 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1843 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1844 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1845 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1846 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1847 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1848 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1857 * ice_fill_sw_rule - Helper function to fill switch rule structure
1858 * @hw: pointer to the hardware structure
1859 * @f_info: entry containing packet forwarding information
1860 * @s_rule: switch rule structure to be filled in based on mac_entry
1861 * @opc: switch rules population command type - pass in the command opcode
1864 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1865 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1867 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1875 if (opc == ice_aqc_opc_remove_sw_rules) {
1876 s_rule->pdata.lkup_tx_rx.act = 0;
1877 s_rule->pdata.lkup_tx_rx.index =
1878 CPU_TO_LE16(f_info->fltr_rule_id);
1879 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1883 eth_hdr_sz = sizeof(dummy_eth_header);
1884 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1886 /* initialize the ether header with a dummy header */
1887 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1888 ice_fill_sw_info(hw, f_info);
1890 switch (f_info->fltr_act) {
1891 case ICE_FWD_TO_VSI:
1892 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1893 ICE_SINGLE_ACT_VSI_ID_M;
1894 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1895 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1896 ICE_SINGLE_ACT_VALID_BIT;
1898 case ICE_FWD_TO_VSI_LIST:
1899 act |= ICE_SINGLE_ACT_VSI_LIST;
1900 act |= (f_info->fwd_id.vsi_list_id <<
1901 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1902 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1903 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1904 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1905 ICE_SINGLE_ACT_VALID_BIT;
1908 act |= ICE_SINGLE_ACT_TO_Q;
1909 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1910 ICE_SINGLE_ACT_Q_INDEX_M;
1912 case ICE_DROP_PACKET:
1913 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1914 ICE_SINGLE_ACT_VALID_BIT;
1916 case ICE_FWD_TO_QGRP:
1917 q_rgn = f_info->qgrp_size > 0 ?
1918 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1919 act |= ICE_SINGLE_ACT_TO_Q;
1920 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1921 ICE_SINGLE_ACT_Q_INDEX_M;
1922 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1923 ICE_SINGLE_ACT_Q_REGION_M;
1930 act |= ICE_SINGLE_ACT_LB_ENABLE;
1932 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1934 switch (f_info->lkup_type) {
1935 case ICE_SW_LKUP_MAC:
1936 daddr = f_info->l_data.mac.mac_addr;
1938 case ICE_SW_LKUP_VLAN:
1939 vlan_id = f_info->l_data.vlan.vlan_id;
1940 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1941 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1942 act |= ICE_SINGLE_ACT_PRUNE;
1943 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1946 case ICE_SW_LKUP_ETHERTYPE_MAC:
1947 daddr = f_info->l_data.ethertype_mac.mac_addr;
1949 case ICE_SW_LKUP_ETHERTYPE:
1950 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1951 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1953 case ICE_SW_LKUP_MAC_VLAN:
1954 daddr = f_info->l_data.mac_vlan.mac_addr;
1955 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1957 case ICE_SW_LKUP_PROMISC_VLAN:
1958 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1960 case ICE_SW_LKUP_PROMISC:
1961 daddr = f_info->l_data.mac_vlan.mac_addr;
1967 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1968 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1969 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1971 /* Recipe set depending on lookup type */
1972 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1973 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1974 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1977 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1978 ICE_NONDMA_TO_NONDMA);
1980 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1981 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1982 *off = CPU_TO_BE16(vlan_id);
1985 /* Create the switch rule with the final dummy Ethernet header */
1986 if (opc != ice_aqc_opc_update_sw_rules)
1987 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1991 * ice_add_marker_act
1992 * @hw: pointer to the hardware structure
1993 * @m_ent: the management entry for which sw marker needs to be added
1994 * @sw_marker: sw marker to tag the Rx descriptor with
1995 * @l_id: large action resource ID
1997 * Create a large action to hold software marker and update the switch rule
1998 * entry pointed by m_ent with newly created large action
2000 static enum ice_status
2001 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2002 u16 sw_marker, u16 l_id)
2004 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2005 /* For software marker we need 3 large actions
2006 * 1. FWD action: FWD TO VSI or VSI LIST
2007 * 2. GENERIC VALUE action to hold the profile ID
2008 * 3. GENERIC VALUE action to hold the software marker ID
2010 const u16 num_lg_acts = 3;
2011 enum ice_status status;
2017 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2018 return ICE_ERR_PARAM;
2020 /* Create two back-to-back switch rules and submit them to the HW using
2021 * one memory buffer:
2025 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2026 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2027 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2029 return ICE_ERR_NO_MEMORY;
2031 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2033 /* Fill in the first switch rule i.e. large action */
2034 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2035 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2036 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2038 /* First action VSI forwarding or VSI list forwarding depending on how
2041 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2042 m_ent->fltr_info.fwd_id.hw_vsi_id;
2044 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2045 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2046 ICE_LG_ACT_VSI_LIST_ID_M;
2047 if (m_ent->vsi_count > 1)
2048 act |= ICE_LG_ACT_VSI_LIST;
2049 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2051 /* Second action descriptor type */
2052 act = ICE_LG_ACT_GENERIC;
2054 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2055 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2057 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2058 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2060 /* Third action Marker value */
2061 act |= ICE_LG_ACT_GENERIC;
2062 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2063 ICE_LG_ACT_GENERIC_VALUE_M;
2065 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2067 /* call the fill switch rule to fill the lookup Tx Rx structure */
2068 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2069 ice_aqc_opc_update_sw_rules);
2071 /* Update the action to point to the large action ID */
2072 rx_tx->pdata.lkup_tx_rx.act =
2073 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2074 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2075 ICE_SINGLE_ACT_PTR_VAL_M));
2077 /* Use the filter rule ID of the previously created rule with single
2078 * act. Once the update happens, hardware will treat this as large
2081 rx_tx->pdata.lkup_tx_rx.index =
2082 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2084 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2085 ice_aqc_opc_update_sw_rules, NULL);
2087 m_ent->lg_act_idx = l_id;
2088 m_ent->sw_marker_id = sw_marker;
2091 ice_free(hw, lg_act);
2096 * ice_add_counter_act - add/update filter rule with counter action
2097 * @hw: pointer to the hardware structure
2098 * @m_ent: the management entry for which counter needs to be added
2099 * @counter_id: VLAN counter ID returned as part of allocate resource
2100 * @l_id: large action resource ID
2102 static enum ice_status
2103 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2104 u16 counter_id, u16 l_id)
2106 struct ice_aqc_sw_rules_elem *lg_act;
2107 struct ice_aqc_sw_rules_elem *rx_tx;
2108 enum ice_status status;
2109 /* 2 actions will be added while adding a large action counter */
2110 const int num_acts = 2;
2117 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2118 return ICE_ERR_PARAM;
2120 /* Create two back-to-back switch rules and submit them to the HW using
2121 * one memory buffer:
2125 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2126 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2127 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2130 return ICE_ERR_NO_MEMORY;
2132 rx_tx = (struct ice_aqc_sw_rules_elem *)
2133 ((u8 *)lg_act + lg_act_size);
2135 /* Fill in the first switch rule i.e. large action */
2136 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2137 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2138 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2140 /* First action VSI forwarding or VSI list forwarding depending on how
2143 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2144 m_ent->fltr_info.fwd_id.hw_vsi_id;
2146 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2147 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2148 ICE_LG_ACT_VSI_LIST_ID_M;
2149 if (m_ent->vsi_count > 1)
2150 act |= ICE_LG_ACT_VSI_LIST;
2151 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2153 /* Second action counter ID */
2154 act = ICE_LG_ACT_STAT_COUNT;
2155 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2156 ICE_LG_ACT_STAT_COUNT_M;
2157 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2159 /* call the fill switch rule to fill the lookup Tx Rx structure */
2160 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2161 ice_aqc_opc_update_sw_rules);
2163 act = ICE_SINGLE_ACT_PTR;
2164 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2165 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2167 /* Use the filter rule ID of the previously created rule with single
2168 * act. Once the update happens, hardware will treat this as large
2171 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2172 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2174 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2175 ice_aqc_opc_update_sw_rules, NULL);
2177 m_ent->lg_act_idx = l_id;
2178 m_ent->counter_index = counter_id;
2181 ice_free(hw, lg_act);
2186 * ice_create_vsi_list_map
2187 * @hw: pointer to the hardware structure
2188 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2189 * @num_vsi: number of VSI handles in the array
2190 * @vsi_list_id: VSI list ID generated as part of allocate resource
2192 * Helper function to create a new entry of VSI list ID to VSI mapping
2193 * using the given VSI list ID
2195 static struct ice_vsi_list_map_info *
2196 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2199 struct ice_switch_info *sw = hw->switch_info;
2200 struct ice_vsi_list_map_info *v_map;
2203 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2208 v_map->vsi_list_id = vsi_list_id;
2210 for (i = 0; i < num_vsi; i++)
2211 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2213 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2218 * ice_update_vsi_list_rule
2219 * @hw: pointer to the hardware structure
2220 * @vsi_handle_arr: array of VSI handles to form a VSI list
2221 * @num_vsi: number of VSI handles in the array
2222 * @vsi_list_id: VSI list ID generated as part of allocate resource
2223 * @remove: Boolean value to indicate if this is a remove action
2224 * @opc: switch rules population command type - pass in the command opcode
2225 * @lkup_type: lookup type of the filter
2227 * Call AQ command to add a new switch rule or update existing switch rule
2228 * using the given VSI list ID
2230 static enum ice_status
2231 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2232 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2233 enum ice_sw_lkup_type lkup_type)
2235 struct ice_aqc_sw_rules_elem *s_rule;
2236 enum ice_status status;
2242 return ICE_ERR_PARAM;
2244 if (lkup_type == ICE_SW_LKUP_MAC ||
2245 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2246 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2247 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2248 lkup_type == ICE_SW_LKUP_PROMISC ||
2249 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2250 lkup_type == ICE_SW_LKUP_LAST)
2251 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2252 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2253 else if (lkup_type == ICE_SW_LKUP_VLAN)
2254 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2255 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2257 return ICE_ERR_PARAM;
2259 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2260 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2262 return ICE_ERR_NO_MEMORY;
2263 for (i = 0; i < num_vsi; i++) {
2264 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2265 status = ICE_ERR_PARAM;
2268 /* AQ call requires hw_vsi_id(s) */
2269 s_rule->pdata.vsi_list.vsi[i] =
2270 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2273 s_rule->type = CPU_TO_LE16(type);
2274 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2275 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2277 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2280 ice_free(hw, s_rule);
2285 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2286 * @hw: pointer to the HW struct
2287 * @vsi_handle_arr: array of VSI handles to form a VSI list
2288 * @num_vsi: number of VSI handles in the array
2289 * @vsi_list_id: stores the ID of the VSI list to be created
2290 * @lkup_type: switch rule filter's lookup type
2292 static enum ice_status
2293 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2294 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2296 enum ice_status status;
2298 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2299 ice_aqc_opc_alloc_res);
2303 /* Update the newly created VSI list to include the specified VSIs */
2304 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2305 *vsi_list_id, false,
2306 ice_aqc_opc_add_sw_rules, lkup_type);
2310 * ice_create_pkt_fwd_rule
2311 * @hw: pointer to the hardware structure
2312 * @f_entry: entry containing packet forwarding information
2314 * Create switch rule with given filter information and add an entry
2315 * to the corresponding filter management list to track this switch rule
2318 static enum ice_status
2319 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2320 struct ice_fltr_list_entry *f_entry)
2322 struct ice_fltr_mgmt_list_entry *fm_entry;
2323 struct ice_aqc_sw_rules_elem *s_rule;
2324 enum ice_sw_lkup_type l_type;
2325 struct ice_sw_recipe *recp;
2326 enum ice_status status;
2328 s_rule = (struct ice_aqc_sw_rules_elem *)
2329 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2331 return ICE_ERR_NO_MEMORY;
2332 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2333 ice_malloc(hw, sizeof(*fm_entry));
2335 status = ICE_ERR_NO_MEMORY;
2336 goto ice_create_pkt_fwd_rule_exit;
2339 fm_entry->fltr_info = f_entry->fltr_info;
2341 /* Initialize all the fields for the management entry */
2342 fm_entry->vsi_count = 1;
2343 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2344 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2345 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2347 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2348 ice_aqc_opc_add_sw_rules);
2350 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2351 ice_aqc_opc_add_sw_rules, NULL);
2353 ice_free(hw, fm_entry);
2354 goto ice_create_pkt_fwd_rule_exit;
2357 f_entry->fltr_info.fltr_rule_id =
2358 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2359 fm_entry->fltr_info.fltr_rule_id =
2360 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2362 /* The book keeping entries will get removed when base driver
2363 * calls remove filter AQ command
2365 l_type = fm_entry->fltr_info.lkup_type;
2366 recp = &hw->switch_info->recp_list[l_type];
2367 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2369 ice_create_pkt_fwd_rule_exit:
2370 ice_free(hw, s_rule);
2375 * ice_update_pkt_fwd_rule
2376 * @hw: pointer to the hardware structure
2377 * @f_info: filter information for switch rule
2379 * Call AQ command to update a previously created switch rule with a
2382 static enum ice_status
2383 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2385 struct ice_aqc_sw_rules_elem *s_rule;
2386 enum ice_status status;
2388 s_rule = (struct ice_aqc_sw_rules_elem *)
2389 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2391 return ICE_ERR_NO_MEMORY;
2393 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2395 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2397 /* Update switch rule with new rule set to forward VSI list */
2398 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2399 ice_aqc_opc_update_sw_rules, NULL);
2401 ice_free(hw, s_rule);
2406 * ice_update_sw_rule_bridge_mode
2407 * @hw: pointer to the HW struct
2409 * Updates unicast switch filter rules based on VEB/VEPA mode
2411 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2413 struct ice_switch_info *sw = hw->switch_info;
2414 struct ice_fltr_mgmt_list_entry *fm_entry;
2415 enum ice_status status = ICE_SUCCESS;
2416 struct LIST_HEAD_TYPE *rule_head;
2417 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2419 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2420 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2422 ice_acquire_lock(rule_lock);
2423 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2425 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2426 u8 *addr = fi->l_data.mac.mac_addr;
2428 /* Update unicast Tx rules to reflect the selected
2431 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2432 (fi->fltr_act == ICE_FWD_TO_VSI ||
2433 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2434 fi->fltr_act == ICE_FWD_TO_Q ||
2435 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2436 status = ice_update_pkt_fwd_rule(hw, fi);
2442 ice_release_lock(rule_lock);
2448 * ice_add_update_vsi_list
2449 * @hw: pointer to the hardware structure
2450 * @m_entry: pointer to current filter management list entry
2451 * @cur_fltr: filter information from the book keeping entry
2452 * @new_fltr: filter information with the new VSI to be added
2454 * Call AQ command to add or update previously created VSI list with new VSI.
2456 * Helper function to do book keeping associated with adding filter information
2457 * The algorithm to do the book keeping is described below :
2458 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2459 * if only one VSI has been added till now
2460 * Allocate a new VSI list and add two VSIs
2461 * to this list using switch rule command
2462 * Update the previously created switch rule with the
2463 * newly created VSI list ID
2464 * if a VSI list was previously created
2465 * Add the new VSI to the previously created VSI list set
2466 * using the update switch rule command
2468 static enum ice_status
2469 ice_add_update_vsi_list(struct ice_hw *hw,
2470 struct ice_fltr_mgmt_list_entry *m_entry,
2471 struct ice_fltr_info *cur_fltr,
2472 struct ice_fltr_info *new_fltr)
2474 enum ice_status status = ICE_SUCCESS;
2475 u16 vsi_list_id = 0;
2477 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2478 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2479 return ICE_ERR_NOT_IMPL;
2481 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2482 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2483 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2484 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2485 return ICE_ERR_NOT_IMPL;
2487 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2488 /* Only one entry existed in the mapping and it was not already
2489 * a part of a VSI list. So, create a VSI list with the old and
2492 struct ice_fltr_info tmp_fltr;
2493 u16 vsi_handle_arr[2];
2495 /* A rule already exists with the new VSI being added */
2496 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2497 return ICE_ERR_ALREADY_EXISTS;
2499 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2500 vsi_handle_arr[1] = new_fltr->vsi_handle;
2501 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2503 new_fltr->lkup_type);
2507 tmp_fltr = *new_fltr;
2508 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2509 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2510 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2511 /* Update the previous switch rule of "MAC forward to VSI" to
2512 * "MAC fwd to VSI list"
2514 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2518 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2519 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2520 m_entry->vsi_list_info =
2521 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2524 /* If this entry was large action then the large action needs
2525 * to be updated to point to FWD to VSI list
2527 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2529 ice_add_marker_act(hw, m_entry,
2530 m_entry->sw_marker_id,
2531 m_entry->lg_act_idx);
2533 u16 vsi_handle = new_fltr->vsi_handle;
2534 enum ice_adminq_opc opcode;
2536 if (!m_entry->vsi_list_info)
2539 /* A rule already exists with the new VSI being added */
2540 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2543 /* Update the previously created VSI list set with
2544 * the new VSI ID passed in
2546 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2547 opcode = ice_aqc_opc_update_sw_rules;
2549 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2550 vsi_list_id, false, opcode,
2551 new_fltr->lkup_type);
2552 /* update VSI list mapping info with new VSI ID */
2554 ice_set_bit(vsi_handle,
2555 m_entry->vsi_list_info->vsi_map);
2558 m_entry->vsi_count++;
2563 * ice_find_rule_entry - Search a rule entry
2564 * @hw: pointer to the hardware structure
2565 * @recp_id: lookup type for which the specified rule needs to be searched
2566 * @f_info: rule information
2568 * Helper function to search for a given rule entry
2569 * Returns pointer to entry storing the rule if found
2571 static struct ice_fltr_mgmt_list_entry *
2572 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2574 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2575 struct ice_switch_info *sw = hw->switch_info;
2576 struct LIST_HEAD_TYPE *list_head;
2578 list_head = &sw->recp_list[recp_id].filt_rules;
2579 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2581 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2582 sizeof(f_info->l_data)) &&
2583 f_info->flag == list_itr->fltr_info.flag) {
2592 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2593 * @hw: pointer to the hardware structure
2594 * @recp_id: lookup type for which VSI lists needs to be searched
2595 * @vsi_handle: VSI handle to be found in VSI list
2596 * @vsi_list_id: VSI list ID found containing vsi_handle
2598 * Helper function to search a VSI list with single entry containing given VSI
2599 * handle element. This can be extended further to search VSI list with more
2600 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2602 static struct ice_vsi_list_map_info *
2603 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2606 struct ice_vsi_list_map_info *map_info = NULL;
2607 struct ice_switch_info *sw = hw->switch_info;
2608 struct LIST_HEAD_TYPE *list_head;
2610 list_head = &sw->recp_list[recp_id].filt_rules;
2611 if (sw->recp_list[recp_id].adv_rule) {
2612 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2614 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2615 ice_adv_fltr_mgmt_list_entry,
2617 if (list_itr->vsi_list_info) {
2618 map_info = list_itr->vsi_list_info;
2619 if (ice_is_bit_set(map_info->vsi_map,
2621 *vsi_list_id = map_info->vsi_list_id;
2627 struct ice_fltr_mgmt_list_entry *list_itr;
2629 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2630 ice_fltr_mgmt_list_entry,
2632 if (list_itr->vsi_count == 1 &&
2633 list_itr->vsi_list_info) {
2634 map_info = list_itr->vsi_list_info;
2635 if (ice_is_bit_set(map_info->vsi_map,
2637 *vsi_list_id = map_info->vsi_list_id;
2647 * ice_add_rule_internal - add rule for a given lookup type
2648 * @hw: pointer to the hardware structure
2649 * @recp_id: lookup type (recipe ID) for which rule has to be added
2650 * @f_entry: structure containing MAC forwarding information
2652 * Adds or updates the rule lists for a given recipe
2654 static enum ice_status
2655 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2656 struct ice_fltr_list_entry *f_entry)
2658 struct ice_switch_info *sw = hw->switch_info;
2659 struct ice_fltr_info *new_fltr, *cur_fltr;
2660 struct ice_fltr_mgmt_list_entry *m_entry;
2661 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2662 enum ice_status status = ICE_SUCCESS;
2664 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2665 return ICE_ERR_PARAM;
2667 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2668 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2669 f_entry->fltr_info.fwd_id.hw_vsi_id =
2670 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2672 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2674 ice_acquire_lock(rule_lock);
2675 new_fltr = &f_entry->fltr_info;
2676 if (new_fltr->flag & ICE_FLTR_RX)
2677 new_fltr->src = hw->port_info->lport;
2678 else if (new_fltr->flag & ICE_FLTR_TX)
2680 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2682 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2684 status = ice_create_pkt_fwd_rule(hw, f_entry);
2685 goto exit_add_rule_internal;
2688 cur_fltr = &m_entry->fltr_info;
2689 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2691 exit_add_rule_internal:
2692 ice_release_lock(rule_lock);
2697 * ice_remove_vsi_list_rule
2698 * @hw: pointer to the hardware structure
2699 * @vsi_list_id: VSI list ID generated as part of allocate resource
2700 * @lkup_type: switch rule filter lookup type
2702 * The VSI list should be emptied before this function is called to remove the
2705 static enum ice_status
2706 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2707 enum ice_sw_lkup_type lkup_type)
2709 struct ice_aqc_sw_rules_elem *s_rule;
2710 enum ice_status status;
2713 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2714 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2716 return ICE_ERR_NO_MEMORY;
2718 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2719 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2721 /* Free the vsi_list resource that we allocated. It is assumed that the
2722 * list is empty at this point.
2724 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2725 ice_aqc_opc_free_res);
2727 ice_free(hw, s_rule);
2732 * ice_rem_update_vsi_list
2733 * @hw: pointer to the hardware structure
2734 * @vsi_handle: VSI handle of the VSI to remove
2735 * @fm_list: filter management entry for which the VSI list management needs to
2738 static enum ice_status
2739 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2740 struct ice_fltr_mgmt_list_entry *fm_list)
2742 enum ice_sw_lkup_type lkup_type;
2743 enum ice_status status = ICE_SUCCESS;
2746 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2747 fm_list->vsi_count == 0)
2748 return ICE_ERR_PARAM;
2750 /* A rule with the VSI being removed does not exist */
2751 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2752 return ICE_ERR_DOES_NOT_EXIST;
2754 lkup_type = fm_list->fltr_info.lkup_type;
2755 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2756 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2757 ice_aqc_opc_update_sw_rules,
2762 fm_list->vsi_count--;
2763 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2765 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2766 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2767 struct ice_vsi_list_map_info *vsi_list_info =
2768 fm_list->vsi_list_info;
2771 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2773 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2774 return ICE_ERR_OUT_OF_RANGE;
2776 /* Make sure VSI list is empty before removing it below */
2777 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2779 ice_aqc_opc_update_sw_rules,
2784 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2785 tmp_fltr_info.fwd_id.hw_vsi_id =
2786 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2787 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2788 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2790 ice_debug(hw, ICE_DBG_SW,
2791 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2792 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2796 fm_list->fltr_info = tmp_fltr_info;
2799 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2800 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2801 struct ice_vsi_list_map_info *vsi_list_info =
2802 fm_list->vsi_list_info;
2804 /* Remove the VSI list since it is no longer used */
2805 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2807 ice_debug(hw, ICE_DBG_SW,
2808 "Failed to remove VSI list %d, error %d\n",
2809 vsi_list_id, status);
2813 LIST_DEL(&vsi_list_info->list_entry);
2814 ice_free(hw, vsi_list_info);
2815 fm_list->vsi_list_info = NULL;
2822 * ice_remove_rule_internal - Remove a filter rule of a given type
2824 * @hw: pointer to the hardware structure
2825 * @recp_id: recipe ID for which the rule needs to removed
2826 * @f_entry: rule entry containing filter information
2828 static enum ice_status
2829 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2830 struct ice_fltr_list_entry *f_entry)
2832 struct ice_switch_info *sw = hw->switch_info;
2833 struct ice_fltr_mgmt_list_entry *list_elem;
2834 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2835 enum ice_status status = ICE_SUCCESS;
2836 bool remove_rule = false;
2839 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2840 return ICE_ERR_PARAM;
2841 f_entry->fltr_info.fwd_id.hw_vsi_id =
2842 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2844 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2845 ice_acquire_lock(rule_lock);
2846 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2848 status = ICE_ERR_DOES_NOT_EXIST;
2852 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2854 } else if (!list_elem->vsi_list_info) {
2855 status = ICE_ERR_DOES_NOT_EXIST;
2857 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2858 /* a ref_cnt > 1 indicates that the vsi_list is being
2859 * shared by multiple rules. Decrement the ref_cnt and
2860 * remove this rule, but do not modify the list, as it
2861 * is in-use by other rules.
2863 list_elem->vsi_list_info->ref_cnt--;
2866 /* a ref_cnt of 1 indicates the vsi_list is only used
2867 * by one rule. However, the original removal request is only
2868 * for a single VSI. Update the vsi_list first, and only
2869 * remove the rule if there are no further VSIs in this list.
2871 vsi_handle = f_entry->fltr_info.vsi_handle;
2872 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2875 /* if VSI count goes to zero after updating the VSI list */
2876 if (list_elem->vsi_count == 0)
2881 /* Remove the lookup rule */
2882 struct ice_aqc_sw_rules_elem *s_rule;
2884 s_rule = (struct ice_aqc_sw_rules_elem *)
2885 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2887 status = ICE_ERR_NO_MEMORY;
2891 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2892 ice_aqc_opc_remove_sw_rules);
2894 status = ice_aq_sw_rules(hw, s_rule,
2895 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2896 ice_aqc_opc_remove_sw_rules, NULL);
2898 /* Remove a book keeping from the list */
2899 ice_free(hw, s_rule);
2904 LIST_DEL(&list_elem->list_entry);
2905 ice_free(hw, list_elem);
2908 ice_release_lock(rule_lock);
2913 * ice_aq_get_res_alloc - get allocated resources
2914 * @hw: pointer to the HW struct
2915 * @num_entries: pointer to u16 to store the number of resource entries returned
2916 * @buf: pointer to user-supplied buffer
2917 * @buf_size: size of buff
2918 * @cd: pointer to command details structure or NULL
2920 * The user-supplied buffer must be large enough to store the resource
2921 * information for all resource types. Each resource type is an
2922 * ice_aqc_get_res_resp_data_elem structure.
2925 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2926 u16 buf_size, struct ice_sq_cd *cd)
2928 struct ice_aqc_get_res_alloc *resp;
2929 enum ice_status status;
2930 struct ice_aq_desc desc;
2933 return ICE_ERR_BAD_PTR;
2935 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2936 return ICE_ERR_INVAL_SIZE;
2938 resp = &desc.params.get_res;
2940 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2941 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2943 if (!status && num_entries)
2944 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2950 * ice_aq_get_res_descs - get allocated resource descriptors
2951 * @hw: pointer to the hardware structure
2952 * @num_entries: number of resource entries in buffer
2953 * @buf: Indirect buffer to hold data parameters and response
2954 * @buf_size: size of buffer for indirect commands
2955 * @res_type: resource type
2956 * @res_shared: is resource shared
2957 * @desc_id: input - first desc ID to start; output - next desc ID
2958 * @cd: pointer to command details structure or NULL
2961 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2962 struct ice_aqc_get_allocd_res_desc_resp *buf,
2963 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2964 struct ice_sq_cd *cd)
2966 struct ice_aqc_get_allocd_res_desc *cmd;
2967 struct ice_aq_desc desc;
2968 enum ice_status status;
2970 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2972 cmd = &desc.params.get_res_desc;
2975 return ICE_ERR_PARAM;
2977 if (buf_size != (num_entries * sizeof(*buf)))
2978 return ICE_ERR_PARAM;
2980 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2982 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2983 ICE_AQC_RES_TYPE_M) | (res_shared ?
2984 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2985 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2987 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2989 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2995 * ice_add_mac - Add a MAC address based filter rule
2996 * @hw: pointer to the hardware structure
2997 * @m_list: list of MAC addresses and forwarding information
2999 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3000 * multiple unicast addresses, the function assumes that all the
3001 * addresses are unique in a given add_mac call. It doesn't
3002 * check for duplicates in this case, removing duplicates from a given
3003 * list should be taken care of in the caller of this function.
3006 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3008 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3009 struct ice_fltr_list_entry *m_list_itr;
3010 struct LIST_HEAD_TYPE *rule_head;
3011 u16 elem_sent, total_elem_left;
3012 struct ice_switch_info *sw;
3013 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3014 enum ice_status status = ICE_SUCCESS;
3015 u16 num_unicast = 0;
3019 return ICE_ERR_PARAM;
3021 sw = hw->switch_info;
3022 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3023 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3025 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3029 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3030 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3031 if (!ice_is_vsi_valid(hw, vsi_handle))
3032 return ICE_ERR_PARAM;
3033 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3034 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3035 /* update the src in case it is VSI num */
3036 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3037 return ICE_ERR_PARAM;
3038 m_list_itr->fltr_info.src = hw_vsi_id;
3039 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3040 IS_ZERO_ETHER_ADDR(add))
3041 return ICE_ERR_PARAM;
3042 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3043 /* Don't overwrite the unicast address */
3044 ice_acquire_lock(rule_lock);
3045 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3046 &m_list_itr->fltr_info)) {
3047 ice_release_lock(rule_lock);
3048 return ICE_ERR_ALREADY_EXISTS;
3050 ice_release_lock(rule_lock);
3052 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3053 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3054 m_list_itr->status =
3055 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3057 if (m_list_itr->status)
3058 return m_list_itr->status;
3062 ice_acquire_lock(rule_lock);
3063 /* Exit if no suitable entries were found for adding bulk switch rule */
3065 status = ICE_SUCCESS;
3066 goto ice_add_mac_exit;
3069 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3071 /* Allocate switch rule buffer for the bulk update for unicast */
3072 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3073 s_rule = (struct ice_aqc_sw_rules_elem *)
3074 ice_calloc(hw, num_unicast, s_rule_size);
3076 status = ICE_ERR_NO_MEMORY;
3077 goto ice_add_mac_exit;
3081 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3083 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3084 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3086 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3087 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3088 ice_aqc_opc_add_sw_rules);
3089 r_iter = (struct ice_aqc_sw_rules_elem *)
3090 ((u8 *)r_iter + s_rule_size);
3094 /* Call AQ bulk switch rule update for all unicast addresses */
3096 /* Call AQ switch rule in AQ_MAX chunk */
3097 for (total_elem_left = num_unicast; total_elem_left > 0;
3098 total_elem_left -= elem_sent) {
3099 struct ice_aqc_sw_rules_elem *entry = r_iter;
3101 elem_sent = min(total_elem_left,
3102 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3103 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3104 elem_sent, ice_aqc_opc_add_sw_rules,
3107 goto ice_add_mac_exit;
3108 r_iter = (struct ice_aqc_sw_rules_elem *)
3109 ((u8 *)r_iter + (elem_sent * s_rule_size));
3112 /* Fill up rule ID based on the value returned from FW */
3114 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3116 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3117 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3118 struct ice_fltr_mgmt_list_entry *fm_entry;
3120 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3121 f_info->fltr_rule_id =
3122 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3123 f_info->fltr_act = ICE_FWD_TO_VSI;
3124 /* Create an entry to track this MAC address */
3125 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3126 ice_malloc(hw, sizeof(*fm_entry));
3128 status = ICE_ERR_NO_MEMORY;
3129 goto ice_add_mac_exit;
3131 fm_entry->fltr_info = *f_info;
3132 fm_entry->vsi_count = 1;
3133 /* The book keeping entries will get removed when
3134 * base driver calls remove filter AQ command
3137 LIST_ADD(&fm_entry->list_entry, rule_head);
3138 r_iter = (struct ice_aqc_sw_rules_elem *)
3139 ((u8 *)r_iter + s_rule_size);
3144 ice_release_lock(rule_lock);
3146 ice_free(hw, s_rule);
3151 * ice_add_vlan_internal - Add one VLAN based filter rule
3152 * @hw: pointer to the hardware structure
3153 * @f_entry: filter entry containing one VLAN information
3155 static enum ice_status
3156 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3158 struct ice_switch_info *sw = hw->switch_info;
3159 struct ice_fltr_mgmt_list_entry *v_list_itr;
3160 struct ice_fltr_info *new_fltr, *cur_fltr;
3161 enum ice_sw_lkup_type lkup_type;
3162 u16 vsi_list_id = 0, vsi_handle;
3163 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3164 enum ice_status status = ICE_SUCCESS;
3166 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3167 return ICE_ERR_PARAM;
3169 f_entry->fltr_info.fwd_id.hw_vsi_id =
3170 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3171 new_fltr = &f_entry->fltr_info;
3173 /* VLAN ID should only be 12 bits */
3174 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3175 return ICE_ERR_PARAM;
3177 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3178 return ICE_ERR_PARAM;
3180 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3181 lkup_type = new_fltr->lkup_type;
3182 vsi_handle = new_fltr->vsi_handle;
3183 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3184 ice_acquire_lock(rule_lock);
3185 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3187 struct ice_vsi_list_map_info *map_info = NULL;
3189 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3190 /* All VLAN pruning rules use a VSI list. Check if
3191 * there is already a VSI list containing VSI that we
3192 * want to add. If found, use the same vsi_list_id for
3193 * this new VLAN rule or else create a new list.
3195 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3199 status = ice_create_vsi_list_rule(hw,
3207 /* Convert the action to forwarding to a VSI list. */
3208 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3209 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3212 status = ice_create_pkt_fwd_rule(hw, f_entry);
3214 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3217 status = ICE_ERR_DOES_NOT_EXIST;
3220 /* reuse VSI list for new rule and increment ref_cnt */
3222 v_list_itr->vsi_list_info = map_info;
3223 map_info->ref_cnt++;
3225 v_list_itr->vsi_list_info =
3226 ice_create_vsi_list_map(hw, &vsi_handle,
3230 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3231 /* Update existing VSI list to add new VSI ID only if it used
3234 cur_fltr = &v_list_itr->fltr_info;
3235 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3238 /* If VLAN rule exists and VSI list being used by this rule is
3239 * referenced by more than 1 VLAN rule. Then create a new VSI
3240 * list appending previous VSI with new VSI and update existing
3241 * VLAN rule to point to new VSI list ID
3243 struct ice_fltr_info tmp_fltr;
3244 u16 vsi_handle_arr[2];
3247 /* Current implementation only supports reusing VSI list with
3248 * one VSI count. We should never hit below condition
3250 if (v_list_itr->vsi_count > 1 &&
3251 v_list_itr->vsi_list_info->ref_cnt > 1) {
3252 ice_debug(hw, ICE_DBG_SW,
3253 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3254 status = ICE_ERR_CFG;
3259 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3262 /* A rule already exists with the new VSI being added */
3263 if (cur_handle == vsi_handle) {
3264 status = ICE_ERR_ALREADY_EXISTS;
3268 vsi_handle_arr[0] = cur_handle;
3269 vsi_handle_arr[1] = vsi_handle;
3270 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3271 &vsi_list_id, lkup_type);
3275 tmp_fltr = v_list_itr->fltr_info;
3276 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3277 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3278 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3279 /* Update the previous switch rule to a new VSI list which
3280 * includes current VSI that is requested
3282 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3286 /* before overriding VSI list map info. decrement ref_cnt of
3289 v_list_itr->vsi_list_info->ref_cnt--;
3291 /* now update to newly created list */
3292 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3293 v_list_itr->vsi_list_info =
3294 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3296 v_list_itr->vsi_count++;
3300 ice_release_lock(rule_lock);
3305 * ice_add_vlan - Add VLAN based filter rule
3306 * @hw: pointer to the hardware structure
3307 * @v_list: list of VLAN entries and forwarding information
3310 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3312 struct ice_fltr_list_entry *v_list_itr;
3315 return ICE_ERR_PARAM;
3317 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3319 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3320 return ICE_ERR_PARAM;
3321 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3322 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3323 if (v_list_itr->status)
3324 return v_list_itr->status;
3330 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3331 * @hw: pointer to the hardware structure
3332 * @mv_list: list of MAC and VLAN filters
3334 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3335 * pruning bits enabled, then it is the responsibility of the caller to make
3336 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3337 * VLAN won't be received on that VSI otherwise.
3340 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3342 struct ice_fltr_list_entry *mv_list_itr;
3344 if (!mv_list || !hw)
3345 return ICE_ERR_PARAM;
3347 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3349 enum ice_sw_lkup_type l_type =
3350 mv_list_itr->fltr_info.lkup_type;
3352 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3353 return ICE_ERR_PARAM;
3354 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3355 mv_list_itr->status =
3356 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3358 if (mv_list_itr->status)
3359 return mv_list_itr->status;
3365 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3366 * @hw: pointer to the hardware structure
3367 * @em_list: list of ether type MAC filter, MAC is optional
3369 * This function requires the caller to populate the entries in
3370 * the filter list with the necessary fields (including flags to
3371 * indicate Tx or Rx rules).
3374 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3376 struct ice_fltr_list_entry *em_list_itr;
3378 if (!em_list || !hw)
3379 return ICE_ERR_PARAM;
3381 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3383 enum ice_sw_lkup_type l_type =
3384 em_list_itr->fltr_info.lkup_type;
3386 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3387 l_type != ICE_SW_LKUP_ETHERTYPE)
3388 return ICE_ERR_PARAM;
3390 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3392 if (em_list_itr->status)
3393 return em_list_itr->status;
3399 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3400 * @hw: pointer to the hardware structure
3401 * @em_list: list of ethertype or ethertype MAC entries
3404 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3406 struct ice_fltr_list_entry *em_list_itr, *tmp;
3408 if (!em_list || !hw)
3409 return ICE_ERR_PARAM;
3411 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3413 enum ice_sw_lkup_type l_type =
3414 em_list_itr->fltr_info.lkup_type;
3416 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3417 l_type != ICE_SW_LKUP_ETHERTYPE)
3418 return ICE_ERR_PARAM;
3420 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3422 if (em_list_itr->status)
3423 return em_list_itr->status;
3429 * ice_rem_sw_rule_info
3430 * @hw: pointer to the hardware structure
3431 * @rule_head: pointer to the switch list structure that we want to delete
3434 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3436 if (!LIST_EMPTY(rule_head)) {
3437 struct ice_fltr_mgmt_list_entry *entry;
3438 struct ice_fltr_mgmt_list_entry *tmp;
3440 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3441 ice_fltr_mgmt_list_entry, list_entry) {
3442 LIST_DEL(&entry->list_entry);
3443 ice_free(hw, entry);
3449 * ice_rem_adv_rule_info
3450 * @hw: pointer to the hardware structure
3451 * @rule_head: pointer to the switch list structure that we want to delete
3454 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3456 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3457 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3459 if (LIST_EMPTY(rule_head))
3462 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3463 ice_adv_fltr_mgmt_list_entry, list_entry) {
3464 LIST_DEL(&lst_itr->list_entry);
3465 ice_free(hw, lst_itr->lkups);
3466 ice_free(hw, lst_itr);
3471 * ice_rem_all_sw_rules_info
3472 * @hw: pointer to the hardware structure
3474 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3476 struct ice_switch_info *sw = hw->switch_info;
3479 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3480 struct LIST_HEAD_TYPE *rule_head;
3482 rule_head = &sw->recp_list[i].filt_rules;
3483 if (!sw->recp_list[i].adv_rule)
3484 ice_rem_sw_rule_info(hw, rule_head);
3486 ice_rem_adv_rule_info(hw, rule_head);
3491 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3492 * @pi: pointer to the port_info structure
3493 * @vsi_handle: VSI handle to set as default
3494 * @set: true to add the above mentioned switch rule, false to remove it
3495 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3497 * add filter rule to set/unset given VSI as default VSI for the switch
3498 * (represented by swid)
3501 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3504 struct ice_aqc_sw_rules_elem *s_rule;
3505 struct ice_fltr_info f_info;
3506 struct ice_hw *hw = pi->hw;
3507 enum ice_adminq_opc opcode;
3508 enum ice_status status;
3512 if (!ice_is_vsi_valid(hw, vsi_handle))
3513 return ICE_ERR_PARAM;
3514 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3516 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3517 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3518 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3520 return ICE_ERR_NO_MEMORY;
3522 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3524 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3525 f_info.flag = direction;
3526 f_info.fltr_act = ICE_FWD_TO_VSI;
3527 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3529 if (f_info.flag & ICE_FLTR_RX) {
3530 f_info.src = pi->lport;
3531 f_info.src_id = ICE_SRC_ID_LPORT;
3533 f_info.fltr_rule_id =
3534 pi->dflt_rx_vsi_rule_id;
3535 } else if (f_info.flag & ICE_FLTR_TX) {
3536 f_info.src_id = ICE_SRC_ID_VSI;
3537 f_info.src = hw_vsi_id;
3539 f_info.fltr_rule_id =
3540 pi->dflt_tx_vsi_rule_id;
3544 opcode = ice_aqc_opc_add_sw_rules;
3546 opcode = ice_aqc_opc_remove_sw_rules;
3548 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3550 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3551 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3554 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3556 if (f_info.flag & ICE_FLTR_TX) {
3557 pi->dflt_tx_vsi_num = hw_vsi_id;
3558 pi->dflt_tx_vsi_rule_id = index;
3559 } else if (f_info.flag & ICE_FLTR_RX) {
3560 pi->dflt_rx_vsi_num = hw_vsi_id;
3561 pi->dflt_rx_vsi_rule_id = index;
3564 if (f_info.flag & ICE_FLTR_TX) {
3565 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3566 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3567 } else if (f_info.flag & ICE_FLTR_RX) {
3568 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3569 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3574 ice_free(hw, s_rule);
3579 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3580 * @hw: pointer to the hardware structure
3581 * @recp_id: lookup type for which the specified rule needs to be searched
3582 * @f_info: rule information
3584 * Helper function to search for a unicast rule entry - this is to be used
3585 * to remove unicast MAC filter that is not shared with other VSIs on the
3588 * Returns pointer to entry storing the rule if found
3590 static struct ice_fltr_mgmt_list_entry *
3591 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3592 struct ice_fltr_info *f_info)
3594 struct ice_switch_info *sw = hw->switch_info;
3595 struct ice_fltr_mgmt_list_entry *list_itr;
3596 struct LIST_HEAD_TYPE *list_head;
3598 list_head = &sw->recp_list[recp_id].filt_rules;
3599 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3601 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3602 sizeof(f_info->l_data)) &&
3603 f_info->fwd_id.hw_vsi_id ==
3604 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3605 f_info->flag == list_itr->fltr_info.flag)
3612 * ice_remove_mac - remove a MAC address based filter rule
3613 * @hw: pointer to the hardware structure
3614 * @m_list: list of MAC addresses and forwarding information
3616 * This function removes either a MAC filter rule or a specific VSI from a
3617 * VSI list for a multicast MAC address.
3619 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3620 * ice_add_mac. Caller should be aware that this call will only work if all
3621 * the entries passed into m_list were added previously. It will not attempt to
3622 * do a partial remove of entries that were found.
3625 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3627 struct ice_fltr_list_entry *list_itr, *tmp;
3628 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3631 return ICE_ERR_PARAM;
3633 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3634 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3636 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3637 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3640 if (l_type != ICE_SW_LKUP_MAC)
3641 return ICE_ERR_PARAM;
3643 vsi_handle = list_itr->fltr_info.vsi_handle;
3644 if (!ice_is_vsi_valid(hw, vsi_handle))
3645 return ICE_ERR_PARAM;
3647 list_itr->fltr_info.fwd_id.hw_vsi_id =
3648 ice_get_hw_vsi_num(hw, vsi_handle);
3649 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3650 /* Don't remove the unicast address that belongs to
3651 * another VSI on the switch, since it is not being
3654 ice_acquire_lock(rule_lock);
3655 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3656 &list_itr->fltr_info)) {
3657 ice_release_lock(rule_lock);
3658 return ICE_ERR_DOES_NOT_EXIST;
3660 ice_release_lock(rule_lock);
3662 list_itr->status = ice_remove_rule_internal(hw,
3665 if (list_itr->status)
3666 return list_itr->status;
3672 * ice_remove_vlan - Remove VLAN based filter rule
3673 * @hw: pointer to the hardware structure
3674 * @v_list: list of VLAN entries and forwarding information
3677 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3679 struct ice_fltr_list_entry *v_list_itr, *tmp;
3682 return ICE_ERR_PARAM;
3684 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3686 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3688 if (l_type != ICE_SW_LKUP_VLAN)
3689 return ICE_ERR_PARAM;
3690 v_list_itr->status = ice_remove_rule_internal(hw,
3693 if (v_list_itr->status)
3694 return v_list_itr->status;
3700 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3701 * @hw: pointer to the hardware structure
3702 * @v_list: list of MAC VLAN entries and forwarding information
3705 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3707 struct ice_fltr_list_entry *v_list_itr, *tmp;
3710 return ICE_ERR_PARAM;
3712 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3714 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3716 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3717 return ICE_ERR_PARAM;
3718 v_list_itr->status =
3719 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3721 if (v_list_itr->status)
3722 return v_list_itr->status;
3728 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3729 * @fm_entry: filter entry to inspect
3730 * @vsi_handle: VSI handle to compare with filter info
3733 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3735 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3736 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3737 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3738 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3743 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3744 * @hw: pointer to the hardware structure
3745 * @vsi_handle: VSI handle to remove filters from
3746 * @vsi_list_head: pointer to the list to add entry to
3747 * @fi: pointer to fltr_info of filter entry to copy & add
3749 * Helper function, used when creating a list of filters to remove from
3750 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3751 * original filter entry, with the exception of fltr_info.fltr_act and
3752 * fltr_info.fwd_id fields. These are set such that later logic can
3753 * extract which VSI to remove the fltr from, and pass on that information.
3755 static enum ice_status
3756 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3757 struct LIST_HEAD_TYPE *vsi_list_head,
3758 struct ice_fltr_info *fi)
3760 struct ice_fltr_list_entry *tmp;
3762 /* this memory is freed up in the caller function
3763 * once filters for this VSI are removed
3765 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3767 return ICE_ERR_NO_MEMORY;
3769 tmp->fltr_info = *fi;
3771 /* Overwrite these fields to indicate which VSI to remove filter from,
3772 * so find and remove logic can extract the information from the
3773 * list entries. Note that original entries will still have proper
3776 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3777 tmp->fltr_info.vsi_handle = vsi_handle;
3778 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3780 LIST_ADD(&tmp->list_entry, vsi_list_head);
3786 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3787 * @hw: pointer to the hardware structure
3788 * @vsi_handle: VSI handle to remove filters from
3789 * @lkup_list_head: pointer to the list that has certain lookup type filters
3790 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3792 * Locates all filters in lkup_list_head that are used by the given VSI,
3793 * and adds COPIES of those entries to vsi_list_head (intended to be used
3794 * to remove the listed filters).
3795 * Note that this means all entries in vsi_list_head must be explicitly
3796 * deallocated by the caller when done with list.
3798 static enum ice_status
3799 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3800 struct LIST_HEAD_TYPE *lkup_list_head,
3801 struct LIST_HEAD_TYPE *vsi_list_head)
3803 struct ice_fltr_mgmt_list_entry *fm_entry;
3804 enum ice_status status = ICE_SUCCESS;
3806 /* check to make sure VSI ID is valid and within boundary */
3807 if (!ice_is_vsi_valid(hw, vsi_handle))
3808 return ICE_ERR_PARAM;
3810 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3811 ice_fltr_mgmt_list_entry, list_entry) {
3812 struct ice_fltr_info *fi;
3814 fi = &fm_entry->fltr_info;
3815 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3818 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3827 * ice_determine_promisc_mask
3828 * @fi: filter info to parse
3830 * Helper function to determine which ICE_PROMISC_ mask corresponds
3831 * to given filter into.
3833 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3835 u16 vid = fi->l_data.mac_vlan.vlan_id;
3836 u8 *macaddr = fi->l_data.mac.mac_addr;
3837 bool is_tx_fltr = false;
3838 u8 promisc_mask = 0;
3840 if (fi->flag == ICE_FLTR_TX)
3843 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3844 promisc_mask |= is_tx_fltr ?
3845 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3846 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3847 promisc_mask |= is_tx_fltr ?
3848 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3849 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3850 promisc_mask |= is_tx_fltr ?
3851 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3853 promisc_mask |= is_tx_fltr ?
3854 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3856 return promisc_mask;
3860 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3861 * @hw: pointer to the hardware structure
3862 * @vsi_handle: VSI handle to retrieve info from
3863 * @promisc_mask: pointer to mask to be filled in
3864 * @vid: VLAN ID of promisc VLAN VSI
3867 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3870 struct ice_switch_info *sw = hw->switch_info;
3871 struct ice_fltr_mgmt_list_entry *itr;
3872 struct LIST_HEAD_TYPE *rule_head;
3873 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3875 if (!ice_is_vsi_valid(hw, vsi_handle))
3876 return ICE_ERR_PARAM;
3880 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3881 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3883 ice_acquire_lock(rule_lock);
3884 LIST_FOR_EACH_ENTRY(itr, rule_head,
3885 ice_fltr_mgmt_list_entry, list_entry) {
3886 /* Continue if this filter doesn't apply to this VSI or the
3887 * VSI ID is not in the VSI map for this filter
3889 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3892 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3894 ice_release_lock(rule_lock);
3900 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3901 * @hw: pointer to the hardware structure
3902 * @vsi_handle: VSI handle to retrieve info from
3903 * @promisc_mask: pointer to mask to be filled in
3904 * @vid: VLAN ID of promisc VLAN VSI
3907 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3910 struct ice_switch_info *sw = hw->switch_info;
3911 struct ice_fltr_mgmt_list_entry *itr;
3912 struct LIST_HEAD_TYPE *rule_head;
3913 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3915 if (!ice_is_vsi_valid(hw, vsi_handle))
3916 return ICE_ERR_PARAM;
3920 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3921 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3923 ice_acquire_lock(rule_lock);
3924 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3926 /* Continue if this filter doesn't apply to this VSI or the
3927 * VSI ID is not in the VSI map for this filter
3929 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3932 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3934 ice_release_lock(rule_lock);
3940 * ice_remove_promisc - Remove promisc based filter rules
3941 * @hw: pointer to the hardware structure
3942 * @recp_id: recipe ID for which the rule needs to removed
3943 * @v_list: list of promisc entries
3945 static enum ice_status
3946 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3947 struct LIST_HEAD_TYPE *v_list)
3949 struct ice_fltr_list_entry *v_list_itr, *tmp;
3951 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3953 v_list_itr->status =
3954 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3955 if (v_list_itr->status)
3956 return v_list_itr->status;
3962 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3963 * @hw: pointer to the hardware structure
3964 * @vsi_handle: VSI handle to clear mode
3965 * @promisc_mask: mask of promiscuous config bits to clear
3966 * @vid: VLAN ID to clear VLAN promiscuous
3969 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3972 struct ice_switch_info *sw = hw->switch_info;
3973 struct ice_fltr_list_entry *fm_entry, *tmp;
3974 struct LIST_HEAD_TYPE remove_list_head;
3975 struct ice_fltr_mgmt_list_entry *itr;
3976 struct LIST_HEAD_TYPE *rule_head;
3977 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3978 enum ice_status status = ICE_SUCCESS;
3981 if (!ice_is_vsi_valid(hw, vsi_handle))
3982 return ICE_ERR_PARAM;
3984 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3985 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3987 recipe_id = ICE_SW_LKUP_PROMISC;
3989 rule_head = &sw->recp_list[recipe_id].filt_rules;
3990 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3992 INIT_LIST_HEAD(&remove_list_head);
3994 ice_acquire_lock(rule_lock);
3995 LIST_FOR_EACH_ENTRY(itr, rule_head,
3996 ice_fltr_mgmt_list_entry, list_entry) {
3997 struct ice_fltr_info *fltr_info;
3998 u8 fltr_promisc_mask = 0;
4000 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4002 fltr_info = &itr->fltr_info;
4004 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4005 vid != fltr_info->l_data.mac_vlan.vlan_id)
4008 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4010 /* Skip if filter is not completely specified by given mask */
4011 if (fltr_promisc_mask & ~promisc_mask)
4014 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4018 ice_release_lock(rule_lock);
4019 goto free_fltr_list;
4022 ice_release_lock(rule_lock);
4024 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4027 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4028 ice_fltr_list_entry, list_entry) {
4029 LIST_DEL(&fm_entry->list_entry);
4030 ice_free(hw, fm_entry);
4037 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4038 * @hw: pointer to the hardware structure
4039 * @vsi_handle: VSI handle to configure
4040 * @promisc_mask: mask of promiscuous config bits
4041 * @vid: VLAN ID to set VLAN promiscuous
4044 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4046 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4047 struct ice_fltr_list_entry f_list_entry;
4048 struct ice_fltr_info new_fltr;
4049 enum ice_status status = ICE_SUCCESS;
4055 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4057 if (!ice_is_vsi_valid(hw, vsi_handle))
4058 return ICE_ERR_PARAM;
4059 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4061 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4063 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4064 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4065 new_fltr.l_data.mac_vlan.vlan_id = vid;
4066 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4068 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4069 recipe_id = ICE_SW_LKUP_PROMISC;
4072 /* Separate filters must be set for each direction/packet type
4073 * combination, so we will loop over the mask value, store the
4074 * individual type, and clear it out in the input mask as it
4077 while (promisc_mask) {
4083 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4084 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4085 pkt_type = UCAST_FLTR;
4086 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4087 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4088 pkt_type = UCAST_FLTR;
4090 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4091 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4092 pkt_type = MCAST_FLTR;
4093 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4094 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4095 pkt_type = MCAST_FLTR;
4097 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4098 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4099 pkt_type = BCAST_FLTR;
4100 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4101 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4102 pkt_type = BCAST_FLTR;
4106 /* Check for VLAN promiscuous flag */
4107 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4108 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4109 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4110 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4114 /* Set filter DA based on packet type */
4115 mac_addr = new_fltr.l_data.mac.mac_addr;
4116 if (pkt_type == BCAST_FLTR) {
4117 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4118 } else if (pkt_type == MCAST_FLTR ||
4119 pkt_type == UCAST_FLTR) {
4120 /* Use the dummy ether header DA */
4121 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4122 ICE_NONDMA_TO_NONDMA);
4123 if (pkt_type == MCAST_FLTR)
4124 mac_addr[0] |= 0x1; /* Set multicast bit */
4127 /* Need to reset this to zero for all iterations */
4130 new_fltr.flag |= ICE_FLTR_TX;
4131 new_fltr.src = hw_vsi_id;
4133 new_fltr.flag |= ICE_FLTR_RX;
4134 new_fltr.src = hw->port_info->lport;
4137 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4138 new_fltr.vsi_handle = vsi_handle;
4139 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4140 f_list_entry.fltr_info = new_fltr;
4142 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4143 if (status != ICE_SUCCESS)
4144 goto set_promisc_exit;
4152 * ice_set_vlan_vsi_promisc
4153 * @hw: pointer to the hardware structure
4154 * @vsi_handle: VSI handle to configure
4155 * @promisc_mask: mask of promiscuous config bits
4156 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4158 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4161 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4162 bool rm_vlan_promisc)
4164 struct ice_switch_info *sw = hw->switch_info;
4165 struct ice_fltr_list_entry *list_itr, *tmp;
4166 struct LIST_HEAD_TYPE vsi_list_head;
4167 struct LIST_HEAD_TYPE *vlan_head;
4168 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4169 enum ice_status status;
4172 INIT_LIST_HEAD(&vsi_list_head);
4173 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4174 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4175 ice_acquire_lock(vlan_lock);
4176 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4178 ice_release_lock(vlan_lock);
4180 goto free_fltr_list;
4182 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4184 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4185 if (rm_vlan_promisc)
4186 status = ice_clear_vsi_promisc(hw, vsi_handle,
4187 promisc_mask, vlan_id);
4189 status = ice_set_vsi_promisc(hw, vsi_handle,
4190 promisc_mask, vlan_id);
4196 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4197 ice_fltr_list_entry, list_entry) {
4198 LIST_DEL(&list_itr->list_entry);
4199 ice_free(hw, list_itr);
4205 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4206 * @hw: pointer to the hardware structure
4207 * @vsi_handle: VSI handle to remove filters from
4208 * @lkup: switch rule filter lookup type
4211 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4212 enum ice_sw_lkup_type lkup)
4214 struct ice_switch_info *sw = hw->switch_info;
4215 struct ice_fltr_list_entry *fm_entry;
4216 struct LIST_HEAD_TYPE remove_list_head;
4217 struct LIST_HEAD_TYPE *rule_head;
4218 struct ice_fltr_list_entry *tmp;
4219 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4220 enum ice_status status;
4222 INIT_LIST_HEAD(&remove_list_head);
4223 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4224 rule_head = &sw->recp_list[lkup].filt_rules;
4225 ice_acquire_lock(rule_lock);
4226 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4228 ice_release_lock(rule_lock);
4233 case ICE_SW_LKUP_MAC:
4234 ice_remove_mac(hw, &remove_list_head);
4236 case ICE_SW_LKUP_VLAN:
4237 ice_remove_vlan(hw, &remove_list_head);
4239 case ICE_SW_LKUP_PROMISC:
4240 case ICE_SW_LKUP_PROMISC_VLAN:
4241 ice_remove_promisc(hw, lkup, &remove_list_head);
4243 case ICE_SW_LKUP_MAC_VLAN:
4244 ice_remove_mac_vlan(hw, &remove_list_head);
4246 case ICE_SW_LKUP_ETHERTYPE:
4247 case ICE_SW_LKUP_ETHERTYPE_MAC:
4248 ice_remove_eth_mac(hw, &remove_list_head);
4250 case ICE_SW_LKUP_DFLT:
4251 ice_debug(hw, ICE_DBG_SW,
4252 "Remove filters for this lookup type hasn't been implemented yet\n");
4254 case ICE_SW_LKUP_LAST:
4255 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4259 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4260 ice_fltr_list_entry, list_entry) {
4261 LIST_DEL(&fm_entry->list_entry);
4262 ice_free(hw, fm_entry);
4267 * ice_remove_vsi_fltr - Remove all filters for a VSI
4268 * @hw: pointer to the hardware structure
4269 * @vsi_handle: VSI handle to remove filters from
4271 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4273 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4275 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4276 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4277 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4278 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4279 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4280 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4281 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4282 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4286 * ice_alloc_res_cntr - allocating resource counter
4287 * @hw: pointer to the hardware structure
4288 * @type: type of resource
4289 * @alloc_shared: if set it is shared else dedicated
4290 * @num_items: number of entries requested for FD resource type
4291 * @counter_id: counter index returned by AQ call
4294 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4297 struct ice_aqc_alloc_free_res_elem *buf;
4298 enum ice_status status;
4301 /* Allocate resource */
4302 buf_len = sizeof(*buf);
4303 buf = (struct ice_aqc_alloc_free_res_elem *)
4304 ice_malloc(hw, buf_len);
4306 return ICE_ERR_NO_MEMORY;
4308 buf->num_elems = CPU_TO_LE16(num_items);
4309 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4310 ICE_AQC_RES_TYPE_M) | alloc_shared);
4312 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4313 ice_aqc_opc_alloc_res, NULL);
4317 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4325 * ice_free_res_cntr - free resource counter
4326 * @hw: pointer to the hardware structure
4327 * @type: type of resource
4328 * @alloc_shared: if set it is shared else dedicated
4329 * @num_items: number of entries to be freed for FD resource type
4330 * @counter_id: counter ID resource which needs to be freed
4333 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4336 struct ice_aqc_alloc_free_res_elem *buf;
4337 enum ice_status status;
4341 buf_len = sizeof(*buf);
4342 buf = (struct ice_aqc_alloc_free_res_elem *)
4343 ice_malloc(hw, buf_len);
4345 return ICE_ERR_NO_MEMORY;
4347 buf->num_elems = CPU_TO_LE16(num_items);
4348 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4349 ICE_AQC_RES_TYPE_M) | alloc_shared);
4350 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4352 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4353 ice_aqc_opc_free_res, NULL);
4355 ice_debug(hw, ICE_DBG_SW,
4356 "counter resource could not be freed\n");
4363 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4364 * @hw: pointer to the hardware structure
4365 * @counter_id: returns counter index
4367 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4369 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4370 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4375 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4376 * @hw: pointer to the hardware structure
4377 * @counter_id: counter index to be freed
4379 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4381 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4382 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4387 * ice_alloc_res_lg_act - add large action resource
4388 * @hw: pointer to the hardware structure
4389 * @l_id: large action ID to fill it in
4390 * @num_acts: number of actions to hold with a large action entry
4392 static enum ice_status
4393 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4395 struct ice_aqc_alloc_free_res_elem *sw_buf;
4396 enum ice_status status;
4399 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4400 return ICE_ERR_PARAM;
4402 /* Allocate resource for large action */
4403 buf_len = sizeof(*sw_buf);
4404 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4405 ice_malloc(hw, buf_len);
4407 return ICE_ERR_NO_MEMORY;
4409 sw_buf->num_elems = CPU_TO_LE16(1);
4411 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4412 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4413 * If num_acts is greater than 2, then use
4414 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4415 * The num_acts cannot exceed 4. This was ensured at the
4416 * beginning of the function.
4419 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4420 else if (num_acts == 2)
4421 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4423 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4425 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4426 ice_aqc_opc_alloc_res, NULL);
4428 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4430 ice_free(hw, sw_buf);
4435 * ice_add_mac_with_sw_marker - add filter with sw marker
4436 * @hw: pointer to the hardware structure
4437 * @f_info: filter info structure containing the MAC filter information
4438 * @sw_marker: sw marker to tag the Rx descriptor with
4441 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4444 struct ice_switch_info *sw = hw->switch_info;
4445 struct ice_fltr_mgmt_list_entry *m_entry;
4446 struct ice_fltr_list_entry fl_info;
4447 struct LIST_HEAD_TYPE l_head;
4448 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4449 enum ice_status ret;
4453 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4454 return ICE_ERR_PARAM;
4456 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4457 return ICE_ERR_PARAM;
4459 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4460 return ICE_ERR_PARAM;
4462 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4463 return ICE_ERR_PARAM;
4464 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4466 /* Add filter if it doesn't exist so then the adding of large
4467 * action always results in update
4470 INIT_LIST_HEAD(&l_head);
4471 fl_info.fltr_info = *f_info;
4472 LIST_ADD(&fl_info.list_entry, &l_head);
4474 entry_exists = false;
4475 ret = ice_add_mac(hw, &l_head);
4476 if (ret == ICE_ERR_ALREADY_EXISTS)
4477 entry_exists = true;
4481 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4482 ice_acquire_lock(rule_lock);
4483 /* Get the book keeping entry for the filter */
4484 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4488 /* If counter action was enabled for this rule then don't enable
4489 * sw marker large action
4491 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4492 ret = ICE_ERR_PARAM;
4496 /* if same marker was added before */
4497 if (m_entry->sw_marker_id == sw_marker) {
4498 ret = ICE_ERR_ALREADY_EXISTS;
4502 /* Allocate a hardware table entry to hold large act. Three actions
4503 * for marker based large action
4505 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4509 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4512 /* Update the switch rule to add the marker action */
4513 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4515 ice_release_lock(rule_lock);
4520 ice_release_lock(rule_lock);
4521 /* only remove entry if it did not exist previously */
4523 ret = ice_remove_mac(hw, &l_head);
4529 * ice_add_mac_with_counter - add filter with counter enabled
4530 * @hw: pointer to the hardware structure
4531 * @f_info: pointer to filter info structure containing the MAC filter
4535 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4537 struct ice_switch_info *sw = hw->switch_info;
4538 struct ice_fltr_mgmt_list_entry *m_entry;
4539 struct ice_fltr_list_entry fl_info;
4540 struct LIST_HEAD_TYPE l_head;
4541 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4542 enum ice_status ret;
4547 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4548 return ICE_ERR_PARAM;
4550 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4551 return ICE_ERR_PARAM;
4553 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4554 return ICE_ERR_PARAM;
4555 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4557 entry_exist = false;
4559 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4561 /* Add filter if it doesn't exist so then the adding of large
4562 * action always results in update
4564 INIT_LIST_HEAD(&l_head);
4566 fl_info.fltr_info = *f_info;
4567 LIST_ADD(&fl_info.list_entry, &l_head);
4569 ret = ice_add_mac(hw, &l_head);
4570 if (ret == ICE_ERR_ALREADY_EXISTS)
4575 ice_acquire_lock(rule_lock);
4576 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4578 ret = ICE_ERR_BAD_PTR;
4582 /* Don't enable counter for a filter for which sw marker was enabled */
4583 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4584 ret = ICE_ERR_PARAM;
4588 /* If a counter was already enabled then don't need to add again */
4589 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4590 ret = ICE_ERR_ALREADY_EXISTS;
4594 /* Allocate a hardware table entry to VLAN counter */
4595 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4599 /* Allocate a hardware table entry to hold large act. Two actions for
4600 * counter based large action
4602 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4606 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4609 /* Update the switch rule to add the counter action */
4610 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4612 ice_release_lock(rule_lock);
4617 ice_release_lock(rule_lock);
4618 /* only remove entry if it did not exist previously */
4620 ret = ice_remove_mac(hw, &l_head);
4625 /* This is mapping table entry that maps every word within a given protocol
4626 * structure to the real byte offset as per the specification of that
4628 * for example dst address is 3 words in ethertype header and corresponding
4629 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4630 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4631 * matching entry describing its field. This needs to be updated if new
4632 * structure is added to that union.
4634 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4635 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4636 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4637 { ICE_ETYPE_OL, { 0 } },
4638 { ICE_VLAN_OFOS, { 0, 2 } },
4639 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4640 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4641 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4642 26, 28, 30, 32, 34, 36, 38 } },
4643 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4644 26, 28, 30, 32, 34, 36, 38 } },
4645 { ICE_TCP_IL, { 0, 2 } },
4646 { ICE_UDP_OF, { 0, 2 } },
4647 { ICE_UDP_ILOS, { 0, 2 } },
4648 { ICE_SCTP_IL, { 0, 2 } },
4649 { ICE_VXLAN, { 8, 10, 12, 14 } },
4650 { ICE_GENEVE, { 8, 10, 12, 14 } },
4651 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4652 { ICE_NVGRE, { 0, 2, 4, 6 } },
4653 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4654 { ICE_PPPOE, { 0, 2, 4, 6 } },
4655 { ICE_PROTOCOL_LAST, { 0 } }
4658 /* The following table describes preferred grouping of recipes.
4659 * If a recipe that needs to be programmed is a superset or matches one of the
4660 * following combinations, then the recipe needs to be chained as per the
4664 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4665 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4666 { ICE_MAC_IL, ICE_MAC_IL_HW },
4667 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4668 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4669 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4670 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4671 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4672 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4673 { ICE_TCP_IL, ICE_TCP_IL_HW },
4674 { ICE_UDP_OF, ICE_UDP_OF_HW },
4675 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4676 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4677 { ICE_VXLAN, ICE_UDP_OF_HW },
4678 { ICE_GENEVE, ICE_UDP_OF_HW },
4679 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4680 { ICE_NVGRE, ICE_GRE_OF_HW },
4681 { ICE_GTP, ICE_UDP_OF_HW },
4682 { ICE_PPPOE, ICE_PPPOE_HW },
4683 { ICE_PROTOCOL_LAST, 0 }
4687 * ice_find_recp - find a recipe
4688 * @hw: pointer to the hardware structure
4689 * @lkup_exts: extension sequence to match
4691 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4693 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4695 bool refresh_required = true;
4696 struct ice_sw_recipe *recp;
4699 /* Walk through existing recipes to find a match */
4700 recp = hw->switch_info->recp_list;
4701 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4702 /* If recipe was not created for this ID, in SW bookkeeping,
4703 * check if FW has an entry for this recipe. If the FW has an
4704 * entry update it in our SW bookkeeping and continue with the
4707 if (!recp[i].recp_created)
4708 if (ice_get_recp_frm_fw(hw,
4709 hw->switch_info->recp_list, i,
4713 /* Skip inverse action recipes */
4714 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4715 ICE_AQ_RECIPE_ACT_INV_ACT)
4718 /* if number of words we are looking for match */
4719 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4720 struct ice_fv_word *a = lkup_exts->fv_words;
4721 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4725 for (p = 0; p < lkup_exts->n_val_words; p++) {
4726 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4728 if (a[p].off == b[q].off &&
4729 a[p].prot_id == b[q].prot_id)
4730 /* Found the "p"th word in the
4735 /* After walking through all the words in the
4736 * "i"th recipe if "p"th word was not found then
4737 * this recipe is not what we are looking for.
4738 * So break out from this loop and try the next
4741 if (q >= recp[i].lkup_exts.n_val_words) {
4746 /* If for "i"th recipe the found was never set to false
4747 * then it means we found our match
4750 return i; /* Return the recipe ID */
4753 return ICE_MAX_NUM_RECIPES;
4757 * ice_prot_type_to_id - get protocol ID from protocol type
4758 * @type: protocol type
4759 * @id: pointer to variable that will receive the ID
4761 * Returns true if found, false otherwise
4763 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4767 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4768 if (ice_prot_id_tbl[i].type == type) {
4769 *id = ice_prot_id_tbl[i].protocol_id;
4776 * ice_find_valid_words - count valid words
4777 * @rule: advanced rule with lookup information
4778 * @lkup_exts: byte offset extractions of the words that are valid
4780 * calculate valid words in a lookup rule using mask value
4783 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4784 struct ice_prot_lkup_ext *lkup_exts)
4790 if (!ice_prot_type_to_id(rule->type, &prot_id))
4793 word = lkup_exts->n_val_words;
4795 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4796 if (((u16 *)&rule->m_u)[j] &&
4797 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4798 /* No more space to accommodate */
4799 if (word >= ICE_MAX_CHAIN_WORDS)
4801 lkup_exts->fv_words[word].off =
4802 ice_prot_ext[rule->type].offs[j];
4803 lkup_exts->fv_words[word].prot_id =
4804 ice_prot_id_tbl[rule->type].protocol_id;
4805 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4809 ret_val = word - lkup_exts->n_val_words;
4810 lkup_exts->n_val_words = word;
4816 * ice_create_first_fit_recp_def - Create a recipe grouping
4817 * @hw: pointer to the hardware structure
4818 * @lkup_exts: an array of protocol header extractions
4819 * @rg_list: pointer to a list that stores new recipe groups
4820 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4822 * Using first fit algorithm, take all the words that are still not done
4823 * and start grouping them in 4-word groups. Each group makes up one
4826 static enum ice_status
4827 ice_create_first_fit_recp_def(struct ice_hw *hw,
4828 struct ice_prot_lkup_ext *lkup_exts,
4829 struct LIST_HEAD_TYPE *rg_list,
4832 struct ice_pref_recipe_group *grp = NULL;
4837 /* Walk through every word in the rule to check if it is not done. If so
4838 * then this word needs to be part of a new recipe.
4840 for (j = 0; j < lkup_exts->n_val_words; j++)
4841 if (!ice_is_bit_set(lkup_exts->done, j)) {
4843 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4844 struct ice_recp_grp_entry *entry;
4846 entry = (struct ice_recp_grp_entry *)
4847 ice_malloc(hw, sizeof(*entry));
4849 return ICE_ERR_NO_MEMORY;
4850 LIST_ADD(&entry->l_entry, rg_list);
4851 grp = &entry->r_group;
4855 grp->pairs[grp->n_val_pairs].prot_id =
4856 lkup_exts->fv_words[j].prot_id;
4857 grp->pairs[grp->n_val_pairs].off =
4858 lkup_exts->fv_words[j].off;
4859 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4867 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4868 * @hw: pointer to the hardware structure
4869 * @fv_list: field vector with the extraction sequence information
4870 * @rg_list: recipe groupings with protocol-offset pairs
4872 * Helper function to fill in the field vector indices for protocol-offset
4873 * pairs. These indexes are then ultimately programmed into a recipe.
4875 static enum ice_status
4876 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4877 struct LIST_HEAD_TYPE *rg_list)
4879 struct ice_sw_fv_list_entry *fv;
4880 struct ice_recp_grp_entry *rg;
4881 struct ice_fv_word *fv_ext;
4883 if (LIST_EMPTY(fv_list))
4886 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4887 fv_ext = fv->fv_ptr->ew;
4889 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4892 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4893 struct ice_fv_word *pr;
4898 pr = &rg->r_group.pairs[i];
4899 mask = rg->r_group.mask[i];
4901 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4902 if (fv_ext[j].prot_id == pr->prot_id &&
4903 fv_ext[j].off == pr->off) {
4906 /* Store index of field vector */
4908 /* Mask is given by caller as big
4909 * endian, but sent to FW as little
4912 rg->fv_mask[i] = mask << 8 | mask >> 8;
4916 /* Protocol/offset could not be found, caller gave an
4920 return ICE_ERR_PARAM;
4928 * ice_find_free_recp_res_idx - find free result indexes for recipe
4929 * @hw: pointer to hardware structure
4930 * @profiles: bitmap of profiles that will be associated with the new recipe
4931 * @free_idx: pointer to variable to receive the free index bitmap
4933 * The algorithm used here is:
4934 * 1. When creating a new recipe, create a set P which contains all
4935 * Profiles that will be associated with our new recipe
4937 * 2. For each Profile p in set P:
4938 * a. Add all recipes associated with Profile p into set R
4939 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4940 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4941 * i. Or just assume they all have the same possible indexes:
4943 * i.e., PossibleIndexes = 0x0000F00000000000
4945 * 3. For each Recipe r in set R:
4946 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4947 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4949 * FreeIndexes will contain the bits indicating the indexes free for use,
4950 * then the code needs to update the recipe[r].used_result_idx_bits to
4951 * indicate which indexes were selected for use by this recipe.
4954 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
4955 ice_bitmap_t *free_idx)
4957 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4958 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
4959 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4963 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
4964 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
4965 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4966 ice_init_possible_res_bm(possible_idx);
4968 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
4969 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
4970 ice_set_bit(bit, possible_idx);
4972 /* For each profile we are going to associate the recipe with, add the
4973 * recipes that are associated with that profile. This will give us
4974 * the set of recipes that our recipe may collide with.
4977 while (ICE_MAX_NUM_PROFILES >
4978 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
4979 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
4980 ICE_MAX_NUM_RECIPES);
4985 /* For each recipe that our new recipe may collide with, determine
4986 * which indexes have been used.
4988 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
4989 if (ice_is_bit_set(recipes, bit))
4990 ice_or_bitmap(used_idx, used_idx,
4991 hw->switch_info->recp_list[bit].res_idxs,
4994 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4996 /* return number of free indexes */
4998 while (ICE_MAX_FV_WORDS >
4999 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5008 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5009 * @hw: pointer to hardware structure
5010 * @rm: recipe management list entry
5011 * @match_tun: if field vector index for tunnel needs to be programmed
5012 * @profiles: bitmap of profiles that will be assocated.
5014 static enum ice_status
5015 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5016 bool match_tun, ice_bitmap_t *profiles)
5018 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5019 struct ice_aqc_recipe_data_elem *tmp;
5020 struct ice_aqc_recipe_data_elem *buf;
5021 struct ice_recp_grp_entry *entry;
5022 enum ice_status status;
5028 /* When more than one recipe are required, another recipe is needed to
5029 * chain them together. Matching a tunnel metadata ID takes up one of
5030 * the match fields in the chaining recipe reducing the number of
5031 * chained recipes by one.
5033 /* check number of free result indices */
5034 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5035 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5037 if (rm->n_grp_count > 1) {
5038 if (rm->n_grp_count > free_res_idx)
5039 return ICE_ERR_MAX_LIMIT;
5044 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5045 ICE_MAX_NUM_RECIPES,
5048 return ICE_ERR_NO_MEMORY;
5050 buf = (struct ice_aqc_recipe_data_elem *)
5051 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5053 status = ICE_ERR_NO_MEMORY;
5057 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5058 recipe_count = ICE_MAX_NUM_RECIPES;
5059 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5061 if (status || recipe_count == 0)
5064 /* Allocate the recipe resources, and configure them according to the
5065 * match fields from protocol headers and extracted field vectors.
5067 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5068 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5071 status = ice_alloc_recipe(hw, &entry->rid);
5075 /* Clear the result index of the located recipe, as this will be
5076 * updated, if needed, later in the recipe creation process.
5078 tmp[0].content.result_indx = 0;
5080 buf[recps] = tmp[0];
5081 buf[recps].recipe_indx = (u8)entry->rid;
5082 /* if the recipe is a non-root recipe RID should be programmed
5083 * as 0 for the rules to be applied correctly.
5085 buf[recps].content.rid = 0;
5086 ice_memset(&buf[recps].content.lkup_indx, 0,
5087 sizeof(buf[recps].content.lkup_indx),
5090 /* All recipes use look-up index 0 to match switch ID. */
5091 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5092 buf[recps].content.mask[0] =
5093 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5094 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5097 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5098 buf[recps].content.lkup_indx[i] = 0x80;
5099 buf[recps].content.mask[i] = 0;
5102 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5103 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5104 buf[recps].content.mask[i + 1] =
5105 CPU_TO_LE16(entry->fv_mask[i]);
5108 if (rm->n_grp_count > 1) {
5109 /* Checks to see if there really is a valid result index
5112 if (chain_idx >= ICE_MAX_FV_WORDS) {
5113 ice_debug(hw, ICE_DBG_SW,
5114 "No chain index available\n");
5115 status = ICE_ERR_MAX_LIMIT;
5119 entry->chain_idx = chain_idx;
5120 buf[recps].content.result_indx =
5121 ICE_AQ_RECIPE_RESULT_EN |
5122 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5123 ICE_AQ_RECIPE_RESULT_DATA_M);
5124 ice_clear_bit(chain_idx, result_idx_bm);
5125 chain_idx = ice_find_first_bit(result_idx_bm,
5129 /* fill recipe dependencies */
5130 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5131 ICE_MAX_NUM_RECIPES);
5132 ice_set_bit(buf[recps].recipe_indx,
5133 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5134 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5138 if (rm->n_grp_count == 1) {
5139 rm->root_rid = buf[0].recipe_indx;
5140 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5141 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5142 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5143 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5144 sizeof(buf[0].recipe_bitmap),
5145 ICE_NONDMA_TO_NONDMA);
5147 status = ICE_ERR_BAD_PTR;
5150 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5151 * the recipe which is getting created if specified
5152 * by user. Usually any advanced switch filter, which results
5153 * into new extraction sequence, ended up creating a new recipe
5154 * of type ROOT and usually recipes are associated with profiles
5155 * Switch rule referreing newly created recipe, needs to have
5156 * either/or 'fwd' or 'join' priority, otherwise switch rule
5157 * evaluation will not happen correctly. In other words, if
5158 * switch rule to be evaluated on priority basis, then recipe
5159 * needs to have priority, otherwise it will be evaluated last.
5161 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5163 struct ice_recp_grp_entry *last_chain_entry;
5166 /* Allocate the last recipe that will chain the outcomes of the
5167 * other recipes together
5169 status = ice_alloc_recipe(hw, &rid);
5173 buf[recps].recipe_indx = (u8)rid;
5174 buf[recps].content.rid = (u8)rid;
5175 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5176 /* the new entry created should also be part of rg_list to
5177 * make sure we have complete recipe
5179 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5180 sizeof(*last_chain_entry));
5181 if (!last_chain_entry) {
5182 status = ICE_ERR_NO_MEMORY;
5185 last_chain_entry->rid = rid;
5186 ice_memset(&buf[recps].content.lkup_indx, 0,
5187 sizeof(buf[recps].content.lkup_indx),
5189 /* All recipes use look-up index 0 to match switch ID. */
5190 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5191 buf[recps].content.mask[0] =
5192 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5193 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5194 buf[recps].content.lkup_indx[i] =
5195 ICE_AQ_RECIPE_LKUP_IGNORE;
5196 buf[recps].content.mask[i] = 0;
5200 /* update r_bitmap with the recp that is used for chaining */
5201 ice_set_bit(rid, rm->r_bitmap);
5202 /* this is the recipe that chains all the other recipes so it
5203 * should not have a chaining ID to indicate the same
5205 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5206 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5208 last_chain_entry->fv_idx[i] = entry->chain_idx;
5209 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5210 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5211 ice_set_bit(entry->rid, rm->r_bitmap);
5213 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5214 if (sizeof(buf[recps].recipe_bitmap) >=
5215 sizeof(rm->r_bitmap)) {
5216 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5217 sizeof(buf[recps].recipe_bitmap),
5218 ICE_NONDMA_TO_NONDMA);
5220 status = ICE_ERR_BAD_PTR;
5223 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5225 /* To differentiate among different UDP tunnels, a meta data ID
5229 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5230 buf[recps].content.mask[i] =
5231 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5235 rm->root_rid = (u8)rid;
5237 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5241 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5242 ice_release_change_lock(hw);
5246 /* Every recipe that just got created add it to the recipe
5249 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5250 struct ice_switch_info *sw = hw->switch_info;
5251 bool is_root, idx_found = false;
5252 struct ice_sw_recipe *recp;
5253 u16 idx, buf_idx = 0;
5255 /* find buffer index for copying some data */
5256 for (idx = 0; idx < rm->n_grp_count; idx++)
5257 if (buf[idx].recipe_indx == entry->rid) {
5263 status = ICE_ERR_OUT_OF_RANGE;
5267 recp = &sw->recp_list[entry->rid];
5268 is_root = (rm->root_rid == entry->rid);
5269 recp->is_root = is_root;
5271 recp->root_rid = entry->rid;
5272 recp->big_recp = (is_root && rm->n_grp_count > 1);
5274 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5275 entry->r_group.n_val_pairs *
5276 sizeof(struct ice_fv_word),
5277 ICE_NONDMA_TO_NONDMA);
5279 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5280 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5282 /* Copy non-result fv index values and masks to recipe. This
5283 * call will also update the result recipe bitmask.
5285 ice_collect_result_idx(&buf[buf_idx], recp);
5287 /* for non-root recipes, also copy to the root, this allows
5288 * easier matching of a complete chained recipe
5291 ice_collect_result_idx(&buf[buf_idx],
5292 &sw->recp_list[rm->root_rid]);
5294 recp->n_ext_words = entry->r_group.n_val_pairs;
5295 recp->chain_idx = entry->chain_idx;
5296 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5297 recp->n_grp_count = rm->n_grp_count;
5298 recp->tun_type = rm->tun_type;
5299 recp->recp_created = true;
5314 * ice_create_recipe_group - creates recipe group
5315 * @hw: pointer to hardware structure
5316 * @rm: recipe management list entry
5317 * @lkup_exts: lookup elements
5319 static enum ice_status
5320 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5321 struct ice_prot_lkup_ext *lkup_exts)
5323 enum ice_status status;
5326 rm->n_grp_count = 0;
5328 /* Create recipes for words that are marked not done by packing them
5331 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5332 &rm->rg_list, &recp_count);
5334 rm->n_grp_count += recp_count;
5335 rm->n_ext_words = lkup_exts->n_val_words;
5336 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5337 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5338 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5339 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5346 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5347 * @hw: pointer to hardware structure
5348 * @lkups: lookup elements or match criteria for the advanced recipe, one
5349 * structure per protocol header
5350 * @lkups_cnt: number of protocols
5351 * @bm: bitmap of field vectors to consider
5352 * @fv_list: pointer to a list that holds the returned field vectors
5354 static enum ice_status
5355 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5356 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5358 enum ice_status status;
5362 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5364 return ICE_ERR_NO_MEMORY;
5366 for (i = 0; i < lkups_cnt; i++)
5367 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5368 status = ICE_ERR_CFG;
5372 /* Find field vectors that include all specified protocol types */
5373 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5376 ice_free(hw, prot_ids);
5381 * ice_add_special_words - Add words that are not protocols, such as metadata
5382 * @rinfo: other information regarding the rule e.g. priority and action info
5383 * @lkup_exts: lookup word structure
5385 static enum ice_status
5386 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5387 struct ice_prot_lkup_ext *lkup_exts)
5389 /* If this is a tunneled packet, then add recipe index to match the
5390 * tunnel bit in the packet metadata flags.
5392 if (rinfo->tun_type != ICE_NON_TUN) {
5393 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5394 u8 word = lkup_exts->n_val_words++;
5396 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5397 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5399 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5401 return ICE_ERR_MAX_LIMIT;
5408 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5409 * @hw: pointer to hardware structure
5410 * @rinfo: other information regarding the rule e.g. priority and action info
5411 * @bm: pointer to memory for returning the bitmap of field vectors
5414 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5417 enum ice_prof_type type;
5419 switch (rinfo->tun_type) {
5421 type = ICE_PROF_NON_TUN;
5423 case ICE_ALL_TUNNELS:
5424 type = ICE_PROF_TUN_ALL;
5426 case ICE_SW_TUN_VXLAN_GPE:
5427 case ICE_SW_TUN_GENEVE:
5428 case ICE_SW_TUN_VXLAN:
5429 case ICE_SW_TUN_UDP:
5430 case ICE_SW_TUN_GTP:
5431 type = ICE_PROF_TUN_UDP;
5433 case ICE_SW_TUN_NVGRE:
5434 type = ICE_PROF_TUN_GRE;
5436 case ICE_SW_TUN_PPPOE:
5437 type = ICE_PROF_TUN_PPPOE;
5439 case ICE_SW_TUN_AND_NON_TUN:
5441 type = ICE_PROF_ALL;
5445 ice_get_sw_fv_bitmap(hw, type, bm);
5449 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5450 * @hw: pointer to hardware structure
5451 * @lkups: lookup elements or match criteria for the advanced recipe, one
5452 * structure per protocol header
5453 * @lkups_cnt: number of protocols
5454 * @rinfo: other information regarding the rule e.g. priority and action info
5455 * @rid: return the recipe ID of the recipe created
5457 static enum ice_status
5458 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5459 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5461 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5462 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5463 struct ice_prot_lkup_ext *lkup_exts;
5464 struct ice_recp_grp_entry *r_entry;
5465 struct ice_sw_fv_list_entry *fvit;
5466 struct ice_recp_grp_entry *r_tmp;
5467 struct ice_sw_fv_list_entry *tmp;
5468 enum ice_status status = ICE_SUCCESS;
5469 struct ice_sw_recipe *rm;
5470 bool match_tun = false;
5474 return ICE_ERR_PARAM;
5476 lkup_exts = (struct ice_prot_lkup_ext *)
5477 ice_malloc(hw, sizeof(*lkup_exts));
5479 return ICE_ERR_NO_MEMORY;
5481 /* Determine the number of words to be matched and if it exceeds a
5482 * recipe's restrictions
5484 for (i = 0; i < lkups_cnt; i++) {
5487 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5488 status = ICE_ERR_CFG;
5489 goto err_free_lkup_exts;
5492 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5494 status = ICE_ERR_CFG;
5495 goto err_free_lkup_exts;
5499 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5501 status = ICE_ERR_NO_MEMORY;
5502 goto err_free_lkup_exts;
5505 /* Get field vectors that contain fields extracted from all the protocol
5506 * headers being programmed.
5508 INIT_LIST_HEAD(&rm->fv_list);
5509 INIT_LIST_HEAD(&rm->rg_list);
5511 /* Get bitmap of field vectors (profiles) that are compatible with the
5512 * rule request; only these will be searched in the subsequent call to
5515 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5517 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5521 /* Group match words into recipes using preferred recipe grouping
5524 status = ice_create_recipe_group(hw, rm, lkup_exts);
5528 /* There is only profile for UDP tunnels. So, it is necessary to use a
5529 * metadata ID flag to differentiate different tunnel types. A separate
5530 * recipe needs to be used for the metadata.
5532 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5533 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5534 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5537 /* set the recipe priority if specified */
5538 rm->priority = rinfo->priority ? rinfo->priority : 0;
5540 /* Find offsets from the field vector. Pick the first one for all the
5543 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5547 /* get bitmap of all profiles the recipe will be associated with */
5548 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5549 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5551 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5552 ice_set_bit((u16)fvit->profile_id, profiles);
5555 /* Create any special protocol/offset pairs, such as looking at tunnel
5556 * bits by extracting metadata
5558 status = ice_add_special_words(rinfo, lkup_exts);
5560 goto err_free_lkup_exts;
5562 /* Look for a recipe which matches our requested fv / mask list */
5563 *rid = ice_find_recp(hw, lkup_exts);
5564 if (*rid < ICE_MAX_NUM_RECIPES)
5565 /* Success if found a recipe that match the existing criteria */
5568 /* Recipe we need does not exist, add a recipe */
5569 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5573 /* Associate all the recipes created with all the profiles in the
5574 * common field vector.
5576 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5578 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5581 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5582 (u8 *)r_bitmap, NULL);
5586 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5587 ICE_MAX_NUM_RECIPES);
5588 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5592 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5595 ice_release_change_lock(hw);
5600 /* Update profile to recipe bitmap array */
5601 ice_memcpy(profile_to_recipe[fvit->profile_id], rm->r_bitmap,
5602 sizeof(rm->r_bitmap), ICE_NONDMA_TO_NONDMA);
5604 /* Update recipe to profile bitmap array */
5605 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5606 if (ice_is_bit_set(rm->r_bitmap, j))
5607 ice_set_bit((u16)fvit->profile_id,
5608 recipe_to_profile[j]);
5611 *rid = rm->root_rid;
5612 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5613 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5615 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5616 ice_recp_grp_entry, l_entry) {
5617 LIST_DEL(&r_entry->l_entry);
5618 ice_free(hw, r_entry);
5621 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5623 LIST_DEL(&fvit->list_entry);
5628 ice_free(hw, rm->root_buf);
5633 ice_free(hw, lkup_exts);
5639 * ice_find_dummy_packet - find dummy packet by tunnel type
5641 * @lkups: lookup elements or match criteria for the advanced recipe, one
5642 * structure per protocol header
5643 * @lkups_cnt: number of protocols
5644 * @tun_type: tunnel type from the match criteria
5645 * @pkt: dummy packet to fill according to filter match criteria
5646 * @pkt_len: packet length of dummy packet
5647 * @offsets: pointer to receive the pointer to the offsets for the packet
5650 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5651 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5653 const struct ice_dummy_pkt_offsets **offsets)
5655 bool tcp = false, udp = false, ipv6 = false;
5658 if (tun_type == ICE_SW_TUN_GTP) {
5659 *pkt = dummy_udp_gtp_packet;
5660 *pkt_len = sizeof(dummy_udp_gtp_packet);
5661 *offsets = dummy_udp_gtp_packet_offsets;
5664 if (tun_type == ICE_SW_TUN_PPPOE) {
5665 *pkt = dummy_pppoe_packet;
5666 *pkt_len = sizeof(dummy_pppoe_packet);
5667 *offsets = dummy_pppoe_packet_offsets;
5670 for (i = 0; i < lkups_cnt; i++) {
5671 if (lkups[i].type == ICE_UDP_ILOS)
5673 else if (lkups[i].type == ICE_TCP_IL)
5675 else if (lkups[i].type == ICE_IPV6_OFOS)
5679 if (tun_type == ICE_ALL_TUNNELS) {
5680 *pkt = dummy_gre_udp_packet;
5681 *pkt_len = sizeof(dummy_gre_udp_packet);
5682 *offsets = dummy_gre_udp_packet_offsets;
5686 if (tun_type == ICE_SW_TUN_NVGRE) {
5688 *pkt = dummy_gre_tcp_packet;
5689 *pkt_len = sizeof(dummy_gre_tcp_packet);
5690 *offsets = dummy_gre_tcp_packet_offsets;
5694 *pkt = dummy_gre_udp_packet;
5695 *pkt_len = sizeof(dummy_gre_udp_packet);
5696 *offsets = dummy_gre_udp_packet_offsets;
5700 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5701 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5703 *pkt = dummy_udp_tun_tcp_packet;
5704 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5705 *offsets = dummy_udp_tun_tcp_packet_offsets;
5709 *pkt = dummy_udp_tun_udp_packet;
5710 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5711 *offsets = dummy_udp_tun_udp_packet_offsets;
5716 *pkt = dummy_udp_packet;
5717 *pkt_len = sizeof(dummy_udp_packet);
5718 *offsets = dummy_udp_packet_offsets;
5720 } else if (udp && ipv6) {
5721 *pkt = dummy_udp_ipv6_packet;
5722 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5723 *offsets = dummy_udp_ipv6_packet_offsets;
5725 } else if ((tcp && ipv6) || ipv6) {
5726 *pkt = dummy_tcp_ipv6_packet;
5727 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5728 *offsets = dummy_tcp_ipv6_packet_offsets;
5732 *pkt = dummy_tcp_packet;
5733 *pkt_len = sizeof(dummy_tcp_packet);
5734 *offsets = dummy_tcp_packet_offsets;
5738 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5740 * @lkups: lookup elements or match criteria for the advanced recipe, one
5741 * structure per protocol header
5742 * @lkups_cnt: number of protocols
5743 * @s_rule: stores rule information from the match criteria
5744 * @dummy_pkt: dummy packet to fill according to filter match criteria
5745 * @pkt_len: packet length of dummy packet
5746 * @offsets: offset info for the dummy packet
5748 static enum ice_status
5749 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5750 struct ice_aqc_sw_rules_elem *s_rule,
5751 const u8 *dummy_pkt, u16 pkt_len,
5752 const struct ice_dummy_pkt_offsets *offsets)
5757 /* Start with a packet with a pre-defined/dummy content. Then, fill
5758 * in the header values to be looked up or matched.
5760 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5762 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5764 for (i = 0; i < lkups_cnt; i++) {
5765 enum ice_protocol_type type;
5766 u16 offset = 0, len = 0, j;
5769 /* find the start of this layer; it should be found since this
5770 * was already checked when search for the dummy packet
5772 type = lkups[i].type;
5773 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5774 if (type == offsets[j].type) {
5775 offset = offsets[j].offset;
5780 /* this should never happen in a correct calling sequence */
5782 return ICE_ERR_PARAM;
5784 switch (lkups[i].type) {
5787 len = sizeof(struct ice_ether_hdr);
5790 len = sizeof(struct ice_ethtype_hdr);
5793 len = sizeof(struct ice_vlan_hdr);
5797 len = sizeof(struct ice_ipv4_hdr);
5801 len = sizeof(struct ice_ipv6_hdr);
5806 len = sizeof(struct ice_l4_hdr);
5809 len = sizeof(struct ice_sctp_hdr);
5812 len = sizeof(struct ice_nvgre);
5817 len = sizeof(struct ice_udp_tnl_hdr);
5821 len = sizeof(struct ice_udp_gtp_hdr);
5824 len = sizeof(struct ice_pppoe_hdr);
5827 return ICE_ERR_PARAM;
5830 /* the length should be a word multiple */
5831 if (len % ICE_BYTES_PER_WORD)
5834 /* We have the offset to the header start, the length, the
5835 * caller's header values and mask. Use this information to
5836 * copy the data into the dummy packet appropriately based on
5837 * the mask. Note that we need to only write the bits as
5838 * indicated by the mask to make sure we don't improperly write
5839 * over any significant packet data.
5841 for (j = 0; j < len / sizeof(u16); j++)
5842 if (((u16 *)&lkups[i].m_u)[j])
5843 ((u16 *)(pkt + offset))[j] =
5844 (((u16 *)(pkt + offset))[j] &
5845 ~((u16 *)&lkups[i].m_u)[j]) |
5846 (((u16 *)&lkups[i].h_u)[j] &
5847 ((u16 *)&lkups[i].m_u)[j]);
5850 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5856 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5857 * @hw: pointer to the hardware structure
5858 * @tun_type: tunnel type
5859 * @pkt: dummy packet to fill in
5860 * @offsets: offset info for the dummy packet
5862 static enum ice_status
5863 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5864 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5869 case ICE_SW_TUN_AND_NON_TUN:
5870 case ICE_SW_TUN_VXLAN_GPE:
5871 case ICE_SW_TUN_VXLAN:
5872 case ICE_SW_TUN_UDP:
5873 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
5877 case ICE_SW_TUN_GENEVE:
5878 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
5883 /* Nothing needs to be done for this tunnel type */
5887 /* Find the outer UDP protocol header and insert the port number */
5888 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5889 if (offsets[i].type == ICE_UDP_OF) {
5890 struct ice_l4_hdr *hdr;
5893 offset = offsets[i].offset;
5894 hdr = (struct ice_l4_hdr *)&pkt[offset];
5895 hdr->dst_port = open_port << 8 | open_port >> 8;
5905 * ice_find_adv_rule_entry - Search a rule entry
5906 * @hw: pointer to the hardware structure
5907 * @lkups: lookup elements or match criteria for the advanced recipe, one
5908 * structure per protocol header
5909 * @lkups_cnt: number of protocols
5910 * @recp_id: recipe ID for which we are finding the rule
5911 * @rinfo: other information regarding the rule e.g. priority and action info
5913 * Helper function to search for a given advance rule entry
5914 * Returns pointer to entry storing the rule if found
5916 static struct ice_adv_fltr_mgmt_list_entry *
5917 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5918 u16 lkups_cnt, u8 recp_id,
5919 struct ice_adv_rule_info *rinfo)
5921 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5922 struct ice_switch_info *sw = hw->switch_info;
5925 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5926 ice_adv_fltr_mgmt_list_entry, list_entry) {
5927 bool lkups_matched = true;
5929 if (lkups_cnt != list_itr->lkups_cnt)
5931 for (i = 0; i < list_itr->lkups_cnt; i++)
5932 if (memcmp(&list_itr->lkups[i], &lkups[i],
5934 lkups_matched = false;
5937 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5938 rinfo->tun_type == list_itr->rule_info.tun_type &&
5946 * ice_adv_add_update_vsi_list
5947 * @hw: pointer to the hardware structure
5948 * @m_entry: pointer to current adv filter management list entry
5949 * @cur_fltr: filter information from the book keeping entry
5950 * @new_fltr: filter information with the new VSI to be added
5952 * Call AQ command to add or update previously created VSI list with new VSI.
5954 * Helper function to do book keeping associated with adding filter information
5955 * The algorithm to do the booking keeping is described below :
5956 * When a VSI needs to subscribe to a given advanced filter
5957 * if only one VSI has been added till now
5958 * Allocate a new VSI list and add two VSIs
5959 * to this list using switch rule command
5960 * Update the previously created switch rule with the
5961 * newly created VSI list ID
5962 * if a VSI list was previously created
5963 * Add the new VSI to the previously created VSI list set
5964 * using the update switch rule command
5966 static enum ice_status
5967 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5968 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5969 struct ice_adv_rule_info *cur_fltr,
5970 struct ice_adv_rule_info *new_fltr)
5972 enum ice_status status;
5973 u16 vsi_list_id = 0;
5975 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5976 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5977 return ICE_ERR_NOT_IMPL;
5979 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5980 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5981 return ICE_ERR_ALREADY_EXISTS;
5983 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5984 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5985 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5986 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5987 return ICE_ERR_NOT_IMPL;
5989 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5990 /* Only one entry existed in the mapping and it was not already
5991 * a part of a VSI list. So, create a VSI list with the old and
5994 struct ice_fltr_info tmp_fltr;
5995 u16 vsi_handle_arr[2];
5997 /* A rule already exists with the new VSI being added */
5998 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5999 new_fltr->sw_act.fwd_id.hw_vsi_id)
6000 return ICE_ERR_ALREADY_EXISTS;
6002 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6003 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6004 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6010 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6011 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6012 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6013 /* Update the previous switch rule of "forward to VSI" to
6016 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6020 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6021 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6022 m_entry->vsi_list_info =
6023 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6026 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6028 if (!m_entry->vsi_list_info)
6031 /* A rule already exists with the new VSI being added */
6032 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6035 /* Update the previously created VSI list set with
6036 * the new VSI ID passed in
6038 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6040 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6042 ice_aqc_opc_update_sw_rules,
6044 /* update VSI list mapping info with new VSI ID */
6046 ice_set_bit(vsi_handle,
6047 m_entry->vsi_list_info->vsi_map);
6050 m_entry->vsi_count++;
6055 * ice_add_adv_rule - helper function to create an advanced switch rule
6056 * @hw: pointer to the hardware structure
6057 * @lkups: information on the words that needs to be looked up. All words
6058 * together makes one recipe
6059 * @lkups_cnt: num of entries in the lkups array
6060 * @rinfo: other information related to the rule that needs to be programmed
6061 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6062 * ignored is case of error.
6064 * This function can program only 1 rule at a time. The lkups is used to
6065 * describe the all the words that forms the "lookup" portion of the recipe.
6066 * These words can span multiple protocols. Callers to this function need to
6067 * pass in a list of protocol headers with lookup information along and mask
6068 * that determines which words are valid from the given protocol header.
6069 * rinfo describes other information related to this rule such as forwarding
6070 * IDs, priority of this rule, etc.
6073 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6074 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6075 struct ice_rule_query_data *added_entry)
6077 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6078 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6079 const struct ice_dummy_pkt_offsets *pkt_offsets;
6080 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6081 struct LIST_HEAD_TYPE *rule_head;
6082 struct ice_switch_info *sw;
6083 enum ice_status status;
6084 const u8 *pkt = NULL;
6090 return ICE_ERR_PARAM;
6092 /* get # of words we need to match */
6094 for (i = 0; i < lkups_cnt; i++) {
6097 ptr = (u16 *)&lkups[i].m_u;
6098 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6102 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6103 return ICE_ERR_PARAM;
6105 /* make sure that we can locate a dummy packet */
6106 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6109 status = ICE_ERR_PARAM;
6110 goto err_ice_add_adv_rule;
6113 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6114 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6115 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6116 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6119 vsi_handle = rinfo->sw_act.vsi_handle;
6120 if (!ice_is_vsi_valid(hw, vsi_handle))
6121 return ICE_ERR_PARAM;
6123 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6124 rinfo->sw_act.fwd_id.hw_vsi_id =
6125 ice_get_hw_vsi_num(hw, vsi_handle);
6126 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6127 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6129 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6132 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6134 /* we have to add VSI to VSI_LIST and increment vsi_count.
6135 * Also Update VSI list so that we can change forwarding rule
6136 * if the rule already exists, we will check if it exists with
6137 * same vsi_id, if not then add it to the VSI list if it already
6138 * exists if not then create a VSI list and add the existing VSI
6139 * ID and the new VSI ID to the list
6140 * We will add that VSI to the list
6142 status = ice_adv_add_update_vsi_list(hw, m_entry,
6143 &m_entry->rule_info,
6146 added_entry->rid = rid;
6147 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6148 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6152 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6153 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6155 return ICE_ERR_NO_MEMORY;
6156 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6157 switch (rinfo->sw_act.fltr_act) {
6158 case ICE_FWD_TO_VSI:
6159 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6160 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6161 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6164 act |= ICE_SINGLE_ACT_TO_Q;
6165 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6166 ICE_SINGLE_ACT_Q_INDEX_M;
6168 case ICE_FWD_TO_QGRP:
6169 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6170 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6171 act |= ICE_SINGLE_ACT_TO_Q;
6172 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6173 ICE_SINGLE_ACT_Q_INDEX_M;
6174 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6175 ICE_SINGLE_ACT_Q_REGION_M;
6177 case ICE_DROP_PACKET:
6178 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6179 ICE_SINGLE_ACT_VALID_BIT;
6182 status = ICE_ERR_CFG;
6183 goto err_ice_add_adv_rule;
6186 /* set the rule LOOKUP type based on caller specified 'RX'
6187 * instead of hardcoding it to be either LOOKUP_TX/RX
6189 * for 'RX' set the source to be the port number
6190 * for 'TX' set the source to be the source HW VSI number (determined
6194 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6195 s_rule->pdata.lkup_tx_rx.src =
6196 CPU_TO_LE16(hw->port_info->lport);
6198 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6199 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6202 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6203 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6205 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6206 pkt_len, pkt_offsets);
6208 goto err_ice_add_adv_rule;
6210 if (rinfo->tun_type != ICE_NON_TUN) {
6211 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6212 s_rule->pdata.lkup_tx_rx.hdr,
6215 goto err_ice_add_adv_rule;
6218 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6219 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6222 goto err_ice_add_adv_rule;
6223 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6224 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6226 status = ICE_ERR_NO_MEMORY;
6227 goto err_ice_add_adv_rule;
6230 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6231 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6232 ICE_NONDMA_TO_NONDMA);
6233 if (!adv_fltr->lkups) {
6234 status = ICE_ERR_NO_MEMORY;
6235 goto err_ice_add_adv_rule;
6238 adv_fltr->lkups_cnt = lkups_cnt;
6239 adv_fltr->rule_info = *rinfo;
6240 adv_fltr->rule_info.fltr_rule_id =
6241 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6242 sw = hw->switch_info;
6243 sw->recp_list[rid].adv_rule = true;
6244 rule_head = &sw->recp_list[rid].filt_rules;
6246 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6247 struct ice_fltr_info tmp_fltr;
6249 tmp_fltr.fltr_rule_id =
6250 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6251 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6252 tmp_fltr.fwd_id.hw_vsi_id =
6253 ice_get_hw_vsi_num(hw, vsi_handle);
6254 tmp_fltr.vsi_handle = vsi_handle;
6255 /* Update the previous switch rule of "forward to VSI" to
6258 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6260 goto err_ice_add_adv_rule;
6261 adv_fltr->vsi_count = 1;
6264 /* Add rule entry to book keeping list */
6265 LIST_ADD(&adv_fltr->list_entry, rule_head);
6267 added_entry->rid = rid;
6268 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6269 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6271 err_ice_add_adv_rule:
6272 if (status && adv_fltr) {
6273 ice_free(hw, adv_fltr->lkups);
6274 ice_free(hw, adv_fltr);
6277 ice_free(hw, s_rule);
6283 * ice_adv_rem_update_vsi_list
6284 * @hw: pointer to the hardware structure
6285 * @vsi_handle: VSI handle of the VSI to remove
6286 * @fm_list: filter management entry for which the VSI list management needs to
6289 static enum ice_status
6290 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6291 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6293 struct ice_vsi_list_map_info *vsi_list_info;
6294 enum ice_sw_lkup_type lkup_type;
6295 enum ice_status status;
6298 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6299 fm_list->vsi_count == 0)
6300 return ICE_ERR_PARAM;
6302 /* A rule with the VSI being removed does not exist */
6303 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6304 return ICE_ERR_DOES_NOT_EXIST;
6306 lkup_type = ICE_SW_LKUP_LAST;
6307 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6308 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6309 ice_aqc_opc_update_sw_rules,
6314 fm_list->vsi_count--;
6315 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6316 vsi_list_info = fm_list->vsi_list_info;
6317 if (fm_list->vsi_count == 1) {
6318 struct ice_fltr_info tmp_fltr;
6321 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6323 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6324 return ICE_ERR_OUT_OF_RANGE;
6326 /* Make sure VSI list is empty before removing it below */
6327 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6329 ice_aqc_opc_update_sw_rules,
6333 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6334 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6335 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6336 tmp_fltr.fwd_id.hw_vsi_id =
6337 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6338 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6339 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6341 /* Update the previous switch rule of "MAC forward to VSI" to
6342 * "MAC fwd to VSI list"
6344 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6346 ice_debug(hw, ICE_DBG_SW,
6347 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6348 tmp_fltr.fwd_id.hw_vsi_id, status);
6352 /* Remove the VSI list since it is no longer used */
6353 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6355 ice_debug(hw, ICE_DBG_SW,
6356 "Failed to remove VSI list %d, error %d\n",
6357 vsi_list_id, status);
6361 LIST_DEL(&vsi_list_info->list_entry);
6362 ice_free(hw, vsi_list_info);
6363 fm_list->vsi_list_info = NULL;
6370 * ice_rem_adv_rule - removes existing advanced switch rule
6371 * @hw: pointer to the hardware structure
6372 * @lkups: information on the words that needs to be looked up. All words
6373 * together makes one recipe
6374 * @lkups_cnt: num of entries in the lkups array
6375 * @rinfo: Its the pointer to the rule information for the rule
6377 * This function can be used to remove 1 rule at a time. The lkups is
6378 * used to describe all the words that forms the "lookup" portion of the
6379 * rule. These words can span multiple protocols. Callers to this function
6380 * need to pass in a list of protocol headers with lookup information along
6381 * and mask that determines which words are valid from the given protocol
6382 * header. rinfo describes other information related to this rule such as
6383 * forwarding IDs, priority of this rule, etc.
6386 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6387 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6389 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6390 struct ice_prot_lkup_ext lkup_exts;
6391 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6392 enum ice_status status = ICE_SUCCESS;
6393 bool remove_rule = false;
6394 u16 i, rid, vsi_handle;
6396 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6397 for (i = 0; i < lkups_cnt; i++) {
6400 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6403 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6408 /* Create any special protocol/offset pairs, such as looking at tunnel
6409 * bits by extracting metadata
6411 status = ice_add_special_words(rinfo, &lkup_exts);
6415 rid = ice_find_recp(hw, &lkup_exts);
6416 /* If did not find a recipe that match the existing criteria */
6417 if (rid == ICE_MAX_NUM_RECIPES)
6418 return ICE_ERR_PARAM;
6420 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6421 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6422 /* the rule is already removed */
6425 ice_acquire_lock(rule_lock);
6426 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6428 } else if (list_elem->vsi_count > 1) {
6429 list_elem->vsi_list_info->ref_cnt--;
6430 remove_rule = false;
6431 vsi_handle = rinfo->sw_act.vsi_handle;
6432 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6434 vsi_handle = rinfo->sw_act.vsi_handle;
6435 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6437 ice_release_lock(rule_lock);
6440 if (list_elem->vsi_count == 0)
6443 ice_release_lock(rule_lock);
6445 struct ice_aqc_sw_rules_elem *s_rule;
6448 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6450 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6453 return ICE_ERR_NO_MEMORY;
6454 s_rule->pdata.lkup_tx_rx.act = 0;
6455 s_rule->pdata.lkup_tx_rx.index =
6456 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6457 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6458 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6460 ice_aqc_opc_remove_sw_rules, NULL);
6461 if (status == ICE_SUCCESS) {
6462 ice_acquire_lock(rule_lock);
6463 LIST_DEL(&list_elem->list_entry);
6464 ice_free(hw, list_elem->lkups);
6465 ice_free(hw, list_elem);
6466 ice_release_lock(rule_lock);
6468 ice_free(hw, s_rule);
6474 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6475 * @hw: pointer to the hardware structure
6476 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6478 * This function is used to remove 1 rule at a time. The removal is based on
6479 * the remove_entry parameter. This function will remove rule for a given
6480 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6483 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6484 struct ice_rule_query_data *remove_entry)
6486 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6487 struct LIST_HEAD_TYPE *list_head;
6488 struct ice_adv_rule_info rinfo;
6489 struct ice_switch_info *sw;
6491 sw = hw->switch_info;
6492 if (!sw->recp_list[remove_entry->rid].recp_created)
6493 return ICE_ERR_PARAM;
6494 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6495 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6497 if (list_itr->rule_info.fltr_rule_id ==
6498 remove_entry->rule_id) {
6499 rinfo = list_itr->rule_info;
6500 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6501 return ice_rem_adv_rule(hw, list_itr->lkups,
6502 list_itr->lkups_cnt, &rinfo);
6505 return ICE_ERR_PARAM;
6509 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6511 * @hw: pointer to the hardware structure
6512 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6514 * This function is used to remove all the rules for a given VSI and as soon
6515 * as removing a rule fails, it will return immediately with the error code,
6516 * else it will return ICE_SUCCESS
6519 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6521 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6522 struct ice_vsi_list_map_info *map_info;
6523 struct LIST_HEAD_TYPE *list_head;
6524 struct ice_adv_rule_info rinfo;
6525 struct ice_switch_info *sw;
6526 enum ice_status status;
6527 u16 vsi_list_id = 0;
6530 sw = hw->switch_info;
6531 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6532 if (!sw->recp_list[rid].recp_created)
6534 if (!sw->recp_list[rid].adv_rule)
6536 list_head = &sw->recp_list[rid].filt_rules;
6538 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6539 ice_adv_fltr_mgmt_list_entry, list_entry) {
6540 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6544 rinfo = list_itr->rule_info;
6545 rinfo.sw_act.vsi_handle = vsi_handle;
6546 status = ice_rem_adv_rule(hw, list_itr->lkups,
6547 list_itr->lkups_cnt, &rinfo);
6557 * ice_replay_fltr - Replay all the filters stored by a specific list head
6558 * @hw: pointer to the hardware structure
6559 * @list_head: list for which filters needs to be replayed
6560 * @recp_id: Recipe ID for which rules need to be replayed
6562 static enum ice_status
6563 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6565 struct ice_fltr_mgmt_list_entry *itr;
6566 struct LIST_HEAD_TYPE l_head;
6567 enum ice_status status = ICE_SUCCESS;
6569 if (LIST_EMPTY(list_head))
6572 /* Move entries from the given list_head to a temporary l_head so that
6573 * they can be replayed. Otherwise when trying to re-add the same
6574 * filter, the function will return already exists
6576 LIST_REPLACE_INIT(list_head, &l_head);
6578 /* Mark the given list_head empty by reinitializing it so filters
6579 * could be added again by *handler
6581 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6583 struct ice_fltr_list_entry f_entry;
6585 f_entry.fltr_info = itr->fltr_info;
6586 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6587 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6588 if (status != ICE_SUCCESS)
6593 /* Add a filter per VSI separately */
6598 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6600 if (!ice_is_vsi_valid(hw, vsi_handle))
6603 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6604 f_entry.fltr_info.vsi_handle = vsi_handle;
6605 f_entry.fltr_info.fwd_id.hw_vsi_id =
6606 ice_get_hw_vsi_num(hw, vsi_handle);
6607 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6608 if (recp_id == ICE_SW_LKUP_VLAN)
6609 status = ice_add_vlan_internal(hw, &f_entry);
6611 status = ice_add_rule_internal(hw, recp_id,
6613 if (status != ICE_SUCCESS)
6618 /* Clear the filter management list */
6619 ice_rem_sw_rule_info(hw, &l_head);
6624 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6625 * @hw: pointer to the hardware structure
6627 * NOTE: This function does not clean up partially added filters on error.
6628 * It is up to caller of the function to issue a reset or fail early.
6630 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6632 struct ice_switch_info *sw = hw->switch_info;
6633 enum ice_status status = ICE_SUCCESS;
6636 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6637 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6639 status = ice_replay_fltr(hw, i, head);
6640 if (status != ICE_SUCCESS)
6647 * ice_replay_vsi_fltr - Replay filters for requested VSI
6648 * @hw: pointer to the hardware structure
6649 * @vsi_handle: driver VSI handle
6650 * @recp_id: Recipe ID for which rules need to be replayed
6651 * @list_head: list for which filters need to be replayed
6653 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6654 * It is required to pass valid VSI handle.
6656 static enum ice_status
6657 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6658 struct LIST_HEAD_TYPE *list_head)
6660 struct ice_fltr_mgmt_list_entry *itr;
6661 enum ice_status status = ICE_SUCCESS;
6664 if (LIST_EMPTY(list_head))
6666 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6668 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6670 struct ice_fltr_list_entry f_entry;
6672 f_entry.fltr_info = itr->fltr_info;
6673 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6674 itr->fltr_info.vsi_handle == vsi_handle) {
6675 /* update the src in case it is VSI num */
6676 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6677 f_entry.fltr_info.src = hw_vsi_id;
6678 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6679 if (status != ICE_SUCCESS)
6683 if (!itr->vsi_list_info ||
6684 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6686 /* Clearing it so that the logic can add it back */
6687 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6688 f_entry.fltr_info.vsi_handle = vsi_handle;
6689 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6690 /* update the src in case it is VSI num */
6691 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6692 f_entry.fltr_info.src = hw_vsi_id;
6693 if (recp_id == ICE_SW_LKUP_VLAN)
6694 status = ice_add_vlan_internal(hw, &f_entry);
6696 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6697 if (status != ICE_SUCCESS)
6705 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6706 * @hw: pointer to the hardware structure
6707 * @vsi_handle: driver VSI handle
6708 * @list_head: list for which filters need to be replayed
6710 * Replay the advanced rule for the given VSI.
6712 static enum ice_status
6713 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6714 struct LIST_HEAD_TYPE *list_head)
6716 struct ice_rule_query_data added_entry = { 0 };
6717 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6718 enum ice_status status = ICE_SUCCESS;
6720 if (LIST_EMPTY(list_head))
6722 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6724 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6725 u16 lk_cnt = adv_fltr->lkups_cnt;
6727 if (vsi_handle != rinfo->sw_act.vsi_handle)
6729 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6738 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6739 * @hw: pointer to the hardware structure
6740 * @vsi_handle: driver VSI handle
6742 * Replays filters for requested VSI via vsi_handle.
6744 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6746 struct ice_switch_info *sw = hw->switch_info;
6747 enum ice_status status;
6750 /* Update the recipes that were created */
6751 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6752 struct LIST_HEAD_TYPE *head;
6754 head = &sw->recp_list[i].filt_replay_rules;
6755 if (!sw->recp_list[i].adv_rule)
6756 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6758 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6759 if (status != ICE_SUCCESS)
6767 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6768 * @hw: pointer to the HW struct
6770 * Deletes the filter replay rules.
6772 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6774 struct ice_switch_info *sw = hw->switch_info;
6780 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6781 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6782 struct LIST_HEAD_TYPE *l_head;
6784 l_head = &sw->recp_list[i].filt_replay_rules;
6785 if (!sw->recp_list[i].adv_rule)
6786 ice_rem_sw_rule_info(hw, l_head);
6788 ice_rem_adv_rule_info(hw, l_head);