1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
61 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
69 { ICE_PROTOCOL_LAST, 0 },
73 u8 dummy_gre_tcp_packet[] = {
74 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
78 0x08, 0x00, /* ICE_ETYPE_OL 12 */
80 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x2F, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
86 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
87 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
90 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x00, 0x00, 0x00,
94 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
103 0x50, 0x02, 0x20, 0x00,
104 0x00, 0x00, 0x00, 0x00
108 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
110 { ICE_ETYPE_OL, 12 },
111 { ICE_IPV4_OFOS, 14 },
115 { ICE_UDP_ILOS, 76 },
116 { ICE_PROTOCOL_LAST, 0 },
120 u8 dummy_gre_udp_packet[] = {
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x08, 0x00, /* ICE_ETYPE_OL 12 */
127 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x2F, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
133 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
141 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
148 0x00, 0x08, 0x00, 0x00,
152 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
154 { ICE_ETYPE_OL, 12 },
155 { ICE_IPV4_OFOS, 14 },
159 { ICE_VXLAN_GPE, 42 },
163 { ICE_PROTOCOL_LAST, 0 },
167 u8 dummy_udp_tun_tcp_packet[] = {
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_OL 12 */
174 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
175 0x00, 0x01, 0x00, 0x00,
176 0x40, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
181 0x00, 0x46, 0x00, 0x00,
183 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
184 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
187 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
191 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
192 0x00, 0x01, 0x00, 0x00,
193 0x40, 0x06, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
198 0x00, 0x00, 0x00, 0x00,
199 0x00, 0x00, 0x00, 0x00,
200 0x50, 0x02, 0x20, 0x00,
201 0x00, 0x00, 0x00, 0x00
205 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
207 { ICE_ETYPE_OL, 12 },
208 { ICE_IPV4_OFOS, 14 },
212 { ICE_VXLAN_GPE, 42 },
215 { ICE_UDP_ILOS, 84 },
216 { ICE_PROTOCOL_LAST, 0 },
220 u8 dummy_udp_tun_udp_packet[] = {
221 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x08, 0x00, /* ICE_ETYPE_OL 12 */
227 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
228 0x00, 0x01, 0x00, 0x00,
229 0x00, 0x11, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
234 0x00, 0x3a, 0x00, 0x00,
236 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
237 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
251 0x00, 0x08, 0x00, 0x00,
255 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
257 { ICE_ETYPE_OL, 12 },
258 { ICE_IPV4_OFOS, 14 },
259 { ICE_UDP_ILOS, 34 },
260 { ICE_PROTOCOL_LAST, 0 },
264 dummy_udp_packet[] = {
265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x08, 0x00, /* ICE_ETYPE_OL 12 */
271 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
272 0x00, 0x01, 0x00, 0x00,
273 0x00, 0x11, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00,
277 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
278 0x00, 0x08, 0x00, 0x00,
280 0x00, 0x00, /* 2 bytes for 4 byte alignment */
284 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
286 { ICE_ETYPE_OL, 12 },
287 { ICE_IPV4_OFOS, 14 },
289 { ICE_PROTOCOL_LAST, 0 },
293 dummy_tcp_packet[] = {
294 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
295 0x00, 0x00, 0x00, 0x00,
296 0x00, 0x00, 0x00, 0x00,
298 0x08, 0x00, /* ICE_ETYPE_OL 12 */
300 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
301 0x00, 0x01, 0x00, 0x00,
302 0x00, 0x06, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
307 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x50, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, /* 2 bytes for 4 byte alignment */
316 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
318 { ICE_ETYPE_OL, 12 },
319 { ICE_IPV6_OFOS, 14 },
321 { ICE_PROTOCOL_LAST, 0 },
325 dummy_tcp_ipv6_packet[] = {
326 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
332 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
333 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x50, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, /* 2 bytes for 4 byte alignment */
353 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
355 { ICE_ETYPE_OL, 12 },
356 { ICE_IPV6_OFOS, 14 },
357 { ICE_UDP_ILOS, 54 },
358 { ICE_PROTOCOL_LAST, 0 },
362 dummy_udp_ipv6_packet[] = {
363 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
364 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00,
367 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
369 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
370 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
381 0x00, 0x08, 0x00, 0x00,
383 0x00, 0x00, /* 2 bytes for 4 byte alignment */
387 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
389 { ICE_IPV4_OFOS, 14 },
392 { ICE_PROTOCOL_LAST, 0 },
396 dummy_udp_gtp_packet[] = {
397 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
402 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
403 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x11, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
409 0x00, 0x1c, 0x00, 0x00,
411 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
412 0x00, 0x00, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x85,
415 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
416 0x00, 0x00, 0x00, 0x00,
420 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
422 { ICE_VLAN_OFOS, 14},
424 { ICE_PROTOCOL_LAST, 0 },
428 dummy_pppoe_packet[] = {
429 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
430 0x00, 0x00, 0x00, 0x00,
431 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
436 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 18 */
437 0x00, 0x4e, 0x00, 0x21,
439 0x45, 0x00, 0x00, 0x30, /* PDU */
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x11, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, /* 2 bytes for 4 byte alignment */
448 /* this is a recipe to profile association bitmap */
449 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
450 ICE_MAX_NUM_PROFILES);
452 /* this is a profile to recipe association bitmap */
453 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
454 ICE_MAX_NUM_RECIPES);
456 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
459 * ice_collect_result_idx - copy result index values
460 * @buf: buffer that contains the result index
461 * @recp: the recipe struct to copy data into
463 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
464 struct ice_sw_recipe *recp)
466 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
467 ice_set_bit(buf->content.result_indx &
468 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
472 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
473 * @hw: pointer to hardware structure
474 * @recps: struct that we need to populate
475 * @rid: recipe ID that we are populating
476 * @refresh_required: true if we should get recipe to profile mapping from FW
478 * This function is used to populate all the necessary entries into our
479 * bookkeeping so that we have a current list of all the recipes that are
480 * programmed in the firmware.
482 static enum ice_status
483 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
484 bool *refresh_required)
486 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
487 struct ice_aqc_recipe_data_elem *tmp;
488 u16 num_recps = ICE_MAX_NUM_RECIPES;
489 struct ice_prot_lkup_ext *lkup_exts;
490 u16 i, sub_recps, fv_word_idx = 0;
491 enum ice_status status;
493 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
495 /* we need a buffer big enough to accommodate all the recipes */
496 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
497 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
499 return ICE_ERR_NO_MEMORY;
501 tmp[0].recipe_indx = rid;
502 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
503 /* non-zero status meaning recipe doesn't exist */
507 /* Get recipe to profile map so that we can get the fv from lkups that
508 * we read for a recipe from FW. Since we want to minimize the number of
509 * times we make this FW call, just make one call and cache the copy
510 * until a new recipe is added. This operation is only required the
511 * first time to get the changes from FW. Then to search existing
512 * entries we don't need to update the cache again until another recipe
515 if (*refresh_required) {
516 ice_get_recp_to_prof_map(hw);
517 *refresh_required = false;
520 /* Start populating all the entries for recps[rid] based on lkups from
521 * firmware. Note that we are only creating the root recipe in our
524 lkup_exts = &recps[rid].lkup_exts;
526 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
527 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
528 struct ice_recp_grp_entry *rg_entry;
529 u8 prof, idx, prot = 0;
533 rg_entry = (struct ice_recp_grp_entry *)
534 ice_malloc(hw, sizeof(*rg_entry));
536 status = ICE_ERR_NO_MEMORY;
540 idx = root_bufs.recipe_indx;
541 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
543 /* Mark all result indices in this chain */
544 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
545 ice_set_bit(root_bufs.content.result_indx &
546 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
548 /* get the first profile that is associated with rid */
549 prof = ice_find_first_bit(recipe_to_profile[idx],
550 ICE_MAX_NUM_PROFILES);
551 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
552 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
554 rg_entry->fv_idx[i] = lkup_indx;
555 rg_entry->fv_mask[i] =
556 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
558 /* If the recipe is a chained recipe then all its
559 * child recipe's result will have a result index.
560 * To fill fv_words we should not use those result
561 * index, we only need the protocol ids and offsets.
562 * We will skip all the fv_idx which stores result
563 * index in them. We also need to skip any fv_idx which
564 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
565 * valid offset value.
567 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
568 rg_entry->fv_idx[i]) ||
569 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
570 rg_entry->fv_idx[i] == 0)
573 ice_find_prot_off(hw, ICE_BLK_SW, prof,
574 rg_entry->fv_idx[i], &prot, &off);
575 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
576 lkup_exts->fv_words[fv_word_idx].off = off;
579 /* populate rg_list with the data from the child entry of this
582 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
584 /* Propagate some data to the recipe database */
585 recps[idx].is_root = is_root;
586 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
587 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
588 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
589 recps[idx].chain_idx = root_bufs.content.result_indx &
590 ~ICE_AQ_RECIPE_RESULT_EN;
591 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
593 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
599 /* Only do the following for root recipes entries */
600 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
601 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
602 recps[idx].root_rid = root_bufs.content.rid &
603 ~ICE_AQ_RECIPE_ID_IS_ROOT;
604 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
607 /* Complete initialization of the root recipe entry */
608 lkup_exts->n_val_words = fv_word_idx;
609 recps[rid].big_recp = (num_recps > 1);
610 recps[rid].n_grp_count = num_recps;
611 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
612 ice_memdup(hw, tmp, recps[rid].n_grp_count *
613 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
614 if (!recps[rid].root_buf)
617 /* Copy result indexes */
618 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
619 recps[rid].recp_created = true;
627 * ice_get_recp_to_prof_map - updates recipe to profile mapping
628 * @hw: pointer to hardware structure
630 * This function is used to populate recipe_to_profile matrix where index to
631 * this array is the recipe ID and the element is the mapping of which profiles
632 * is this recipe mapped to.
635 ice_get_recp_to_prof_map(struct ice_hw *hw)
637 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
640 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
643 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
644 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
645 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
647 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
648 ICE_MAX_NUM_RECIPES);
649 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
650 if (ice_is_bit_set(r_bitmap, j))
651 ice_set_bit(i, recipe_to_profile[j]);
656 * ice_init_def_sw_recp - initialize the recipe book keeping tables
657 * @hw: pointer to the HW struct
659 * Allocate memory for the entire recipe table and initialize the structures/
660 * entries corresponding to basic recipes.
662 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
664 struct ice_sw_recipe *recps;
667 recps = (struct ice_sw_recipe *)
668 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
670 return ICE_ERR_NO_MEMORY;
672 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
673 recps[i].root_rid = i;
674 INIT_LIST_HEAD(&recps[i].filt_rules);
675 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
676 INIT_LIST_HEAD(&recps[i].rg_list);
677 ice_init_lock(&recps[i].filt_rule_lock);
680 hw->switch_info->recp_list = recps;
686 * ice_aq_get_sw_cfg - get switch configuration
687 * @hw: pointer to the hardware structure
688 * @buf: pointer to the result buffer
689 * @buf_size: length of the buffer available for response
690 * @req_desc: pointer to requested descriptor
691 * @num_elems: pointer to number of elements
692 * @cd: pointer to command details structure or NULL
694 * Get switch configuration (0x0200) to be placed in 'buff'.
695 * This admin command returns information such as initial VSI/port number
696 * and switch ID it belongs to.
698 * NOTE: *req_desc is both an input/output parameter.
699 * The caller of this function first calls this function with *request_desc set
700 * to 0. If the response from f/w has *req_desc set to 0, all the switch
701 * configuration information has been returned; if non-zero (meaning not all
702 * the information was returned), the caller should call this function again
703 * with *req_desc set to the previous value returned by f/w to get the
704 * next block of switch configuration information.
706 * *num_elems is output only parameter. This reflects the number of elements
707 * in response buffer. The caller of this function to use *num_elems while
708 * parsing the response buffer.
710 static enum ice_status
711 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
712 u16 buf_size, u16 *req_desc, u16 *num_elems,
713 struct ice_sq_cd *cd)
715 struct ice_aqc_get_sw_cfg *cmd;
716 enum ice_status status;
717 struct ice_aq_desc desc;
719 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
720 cmd = &desc.params.get_sw_conf;
721 cmd->element = CPU_TO_LE16(*req_desc);
723 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
725 *req_desc = LE16_TO_CPU(cmd->element);
726 *num_elems = LE16_TO_CPU(cmd->num_elems);
733 * ice_alloc_sw - allocate resources specific to switch
734 * @hw: pointer to the HW struct
735 * @ena_stats: true to turn on VEB stats
736 * @shared_res: true for shared resource, false for dedicated resource
737 * @sw_id: switch ID returned
738 * @counter_id: VEB counter ID returned
740 * allocates switch resources (SWID and VEB counter) (0x0208)
743 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
746 struct ice_aqc_alloc_free_res_elem *sw_buf;
747 struct ice_aqc_res_elem *sw_ele;
748 enum ice_status status;
751 buf_len = sizeof(*sw_buf);
752 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
753 ice_malloc(hw, buf_len);
755 return ICE_ERR_NO_MEMORY;
757 /* Prepare buffer for switch ID.
758 * The number of resource entries in buffer is passed as 1 since only a
759 * single switch/VEB instance is allocated, and hence a single sw_id
762 sw_buf->num_elems = CPU_TO_LE16(1);
764 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
765 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
766 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
768 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
769 ice_aqc_opc_alloc_res, NULL);
772 goto ice_alloc_sw_exit;
774 sw_ele = &sw_buf->elem[0];
775 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
778 /* Prepare buffer for VEB Counter */
779 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
780 struct ice_aqc_alloc_free_res_elem *counter_buf;
781 struct ice_aqc_res_elem *counter_ele;
783 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
784 ice_malloc(hw, buf_len);
786 status = ICE_ERR_NO_MEMORY;
787 goto ice_alloc_sw_exit;
790 /* The number of resource entries in buffer is passed as 1 since
791 * only a single switch/VEB instance is allocated, and hence a
792 * single VEB counter is requested.
794 counter_buf->num_elems = CPU_TO_LE16(1);
795 counter_buf->res_type =
796 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
797 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
798 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
802 ice_free(hw, counter_buf);
803 goto ice_alloc_sw_exit;
805 counter_ele = &counter_buf->elem[0];
806 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
807 ice_free(hw, counter_buf);
811 ice_free(hw, sw_buf);
816 * ice_free_sw - free resources specific to switch
817 * @hw: pointer to the HW struct
818 * @sw_id: switch ID returned
819 * @counter_id: VEB counter ID returned
821 * free switch resources (SWID and VEB counter) (0x0209)
823 * NOTE: This function frees multiple resources. It continues
824 * releasing other resources even after it encounters error.
825 * The error code returned is the last error it encountered.
827 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
829 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
830 enum ice_status status, ret_status;
833 buf_len = sizeof(*sw_buf);
834 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
835 ice_malloc(hw, buf_len);
837 return ICE_ERR_NO_MEMORY;
839 /* Prepare buffer to free for switch ID res.
840 * The number of resource entries in buffer is passed as 1 since only a
841 * single switch/VEB instance is freed, and hence a single sw_id
844 sw_buf->num_elems = CPU_TO_LE16(1);
845 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
846 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
848 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
849 ice_aqc_opc_free_res, NULL);
852 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
854 /* Prepare buffer to free for VEB Counter resource */
855 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
856 ice_malloc(hw, buf_len);
858 ice_free(hw, sw_buf);
859 return ICE_ERR_NO_MEMORY;
862 /* The number of resource entries in buffer is passed as 1 since only a
863 * single switch/VEB instance is freed, and hence a single VEB counter
866 counter_buf->num_elems = CPU_TO_LE16(1);
867 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
868 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
870 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
871 ice_aqc_opc_free_res, NULL);
873 ice_debug(hw, ICE_DBG_SW,
874 "VEB counter resource could not be freed\n");
878 ice_free(hw, counter_buf);
879 ice_free(hw, sw_buf);
885 * @hw: pointer to the HW struct
886 * @vsi_ctx: pointer to a VSI context struct
887 * @cd: pointer to command details structure or NULL
889 * Add a VSI context to the hardware (0x0210)
892 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
893 struct ice_sq_cd *cd)
895 struct ice_aqc_add_update_free_vsi_resp *res;
896 struct ice_aqc_add_get_update_free_vsi *cmd;
897 struct ice_aq_desc desc;
898 enum ice_status status;
900 cmd = &desc.params.vsi_cmd;
901 res = &desc.params.add_update_free_vsi_res;
903 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
905 if (!vsi_ctx->alloc_from_pool)
906 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
907 ICE_AQ_VSI_IS_VALID);
909 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
911 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
913 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
914 sizeof(vsi_ctx->info), cd);
917 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
918 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
919 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
927 * @hw: pointer to the HW struct
928 * @vsi_ctx: pointer to a VSI context struct
929 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
930 * @cd: pointer to command details structure or NULL
932 * Free VSI context info from hardware (0x0213)
935 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
936 bool keep_vsi_alloc, struct ice_sq_cd *cd)
938 struct ice_aqc_add_update_free_vsi_resp *resp;
939 struct ice_aqc_add_get_update_free_vsi *cmd;
940 struct ice_aq_desc desc;
941 enum ice_status status;
943 cmd = &desc.params.vsi_cmd;
944 resp = &desc.params.add_update_free_vsi_res;
946 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
948 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
950 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
952 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
954 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
955 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
963 * @hw: pointer to the HW struct
964 * @vsi_ctx: pointer to a VSI context struct
965 * @cd: pointer to command details structure or NULL
967 * Update VSI context in the hardware (0x0211)
970 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
971 struct ice_sq_cd *cd)
973 struct ice_aqc_add_update_free_vsi_resp *resp;
974 struct ice_aqc_add_get_update_free_vsi *cmd;
975 struct ice_aq_desc desc;
976 enum ice_status status;
978 cmd = &desc.params.vsi_cmd;
979 resp = &desc.params.add_update_free_vsi_res;
981 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
983 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
985 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
987 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
988 sizeof(vsi_ctx->info), cd);
991 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
992 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
999 * ice_is_vsi_valid - check whether the VSI is valid or not
1000 * @hw: pointer to the HW struct
1001 * @vsi_handle: VSI handle
1003 * check whether the VSI is valid or not
1005 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1007 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1011 * ice_get_hw_vsi_num - return the HW VSI number
1012 * @hw: pointer to the HW struct
1013 * @vsi_handle: VSI handle
1015 * return the HW VSI number
1016 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1018 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1020 return hw->vsi_ctx[vsi_handle]->vsi_num;
1024 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1025 * @hw: pointer to the HW struct
1026 * @vsi_handle: VSI handle
1028 * return the VSI context entry for a given VSI handle
1030 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1032 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1036 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1037 * @hw: pointer to the HW struct
1038 * @vsi_handle: VSI handle
1039 * @vsi: VSI context pointer
1041 * save the VSI context entry for a given VSI handle
1044 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1046 hw->vsi_ctx[vsi_handle] = vsi;
1050 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1051 * @hw: pointer to the HW struct
1052 * @vsi_handle: VSI handle
1054 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1056 struct ice_vsi_ctx *vsi;
1059 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1062 ice_for_each_traffic_class(i) {
1063 if (vsi->lan_q_ctx[i]) {
1064 ice_free(hw, vsi->lan_q_ctx[i]);
1065 vsi->lan_q_ctx[i] = NULL;
1071 * ice_clear_vsi_ctx - clear the VSI context entry
1072 * @hw: pointer to the HW struct
1073 * @vsi_handle: VSI handle
1075 * clear the VSI context entry
1077 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1079 struct ice_vsi_ctx *vsi;
1081 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1083 ice_clear_vsi_q_ctx(hw, vsi_handle);
1085 hw->vsi_ctx[vsi_handle] = NULL;
1090 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1091 * @hw: pointer to the HW struct
1093 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1097 for (i = 0; i < ICE_MAX_VSI; i++)
1098 ice_clear_vsi_ctx(hw, i);
1102 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1103 * @hw: pointer to the HW struct
1104 * @vsi_handle: unique VSI handle provided by drivers
1105 * @vsi_ctx: pointer to a VSI context struct
1106 * @cd: pointer to command details structure or NULL
1108 * Add a VSI context to the hardware also add it into the VSI handle list.
1109 * If this function gets called after reset for existing VSIs then update
1110 * with the new HW VSI number in the corresponding VSI handle list entry.
1113 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1114 struct ice_sq_cd *cd)
1116 struct ice_vsi_ctx *tmp_vsi_ctx;
1117 enum ice_status status;
1119 if (vsi_handle >= ICE_MAX_VSI)
1120 return ICE_ERR_PARAM;
1121 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1124 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1126 /* Create a new VSI context */
1127 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1128 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1130 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1131 return ICE_ERR_NO_MEMORY;
1133 *tmp_vsi_ctx = *vsi_ctx;
1135 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1137 /* update with new HW VSI num */
1138 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1145 * ice_free_vsi- free VSI context from hardware and VSI handle list
1146 * @hw: pointer to the HW struct
1147 * @vsi_handle: unique VSI handle
1148 * @vsi_ctx: pointer to a VSI context struct
1149 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1150 * @cd: pointer to command details structure or NULL
1152 * Free VSI context info from hardware as well as from VSI handle list
1155 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1156 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1158 enum ice_status status;
1160 if (!ice_is_vsi_valid(hw, vsi_handle))
1161 return ICE_ERR_PARAM;
1162 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1163 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1165 ice_clear_vsi_ctx(hw, vsi_handle);
1171 * @hw: pointer to the HW struct
1172 * @vsi_handle: unique VSI handle
1173 * @vsi_ctx: pointer to a VSI context struct
1174 * @cd: pointer to command details structure or NULL
1176 * Update VSI context in the hardware
1179 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1180 struct ice_sq_cd *cd)
1182 if (!ice_is_vsi_valid(hw, vsi_handle))
1183 return ICE_ERR_PARAM;
1184 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1185 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1189 * ice_aq_get_vsi_params
1190 * @hw: pointer to the HW struct
1191 * @vsi_ctx: pointer to a VSI context struct
1192 * @cd: pointer to command details structure or NULL
1194 * Get VSI context info from hardware (0x0212)
1197 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1198 struct ice_sq_cd *cd)
1200 struct ice_aqc_add_get_update_free_vsi *cmd;
1201 struct ice_aqc_get_vsi_resp *resp;
1202 struct ice_aq_desc desc;
1203 enum ice_status status;
1205 cmd = &desc.params.vsi_cmd;
1206 resp = &desc.params.get_vsi_resp;
1208 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1210 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1212 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1213 sizeof(vsi_ctx->info), cd);
1215 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1217 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1218 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1225 * ice_aq_add_update_mir_rule - add/update a mirror rule
1226 * @hw: pointer to the HW struct
1227 * @rule_type: Rule Type
1228 * @dest_vsi: VSI number to which packets will be mirrored
1229 * @count: length of the list
1230 * @mr_buf: buffer for list of mirrored VSI numbers
1231 * @cd: pointer to command details structure or NULL
1234 * Add/Update Mirror Rule (0x260).
1237 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1238 u16 count, struct ice_mir_rule_buf *mr_buf,
1239 struct ice_sq_cd *cd, u16 *rule_id)
1241 struct ice_aqc_add_update_mir_rule *cmd;
1242 struct ice_aq_desc desc;
1243 enum ice_status status;
1244 __le16 *mr_list = NULL;
1247 switch (rule_type) {
1248 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1249 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1250 /* Make sure count and mr_buf are set for these rule_types */
1251 if (!(count && mr_buf))
1252 return ICE_ERR_PARAM;
1254 buf_size = count * sizeof(__le16);
1255 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1257 return ICE_ERR_NO_MEMORY;
1259 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1260 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1261 /* Make sure count and mr_buf are not set for these
1264 if (count || mr_buf)
1265 return ICE_ERR_PARAM;
1268 ice_debug(hw, ICE_DBG_SW,
1269 "Error due to unsupported rule_type %u\n", rule_type);
1270 return ICE_ERR_OUT_OF_RANGE;
1273 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1275 /* Pre-process 'mr_buf' items for add/update of virtual port
1276 * ingress/egress mirroring (but not physical port ingress/egress
1282 for (i = 0; i < count; i++) {
1285 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1287 /* Validate specified VSI number, make sure it is less
1288 * than ICE_MAX_VSI, if not return with error.
1290 if (id >= ICE_MAX_VSI) {
1291 ice_debug(hw, ICE_DBG_SW,
1292 "Error VSI index (%u) out-of-range\n",
1294 ice_free(hw, mr_list);
1295 return ICE_ERR_OUT_OF_RANGE;
1298 /* add VSI to mirror rule */
1301 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1302 else /* remove VSI from mirror rule */
1303 mr_list[i] = CPU_TO_LE16(id);
1307 cmd = &desc.params.add_update_rule;
1308 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1309 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1310 ICE_AQC_RULE_ID_VALID_M);
1311 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1312 cmd->num_entries = CPU_TO_LE16(count);
1313 cmd->dest = CPU_TO_LE16(dest_vsi);
1315 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1317 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1319 ice_free(hw, mr_list);
1325 * ice_aq_delete_mir_rule - delete a mirror rule
1326 * @hw: pointer to the HW struct
1327 * @rule_id: Mirror rule ID (to be deleted)
1328 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1329 * otherwise it is returned to the shared pool
1330 * @cd: pointer to command details structure or NULL
1332 * Delete Mirror Rule (0x261).
1335 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1336 struct ice_sq_cd *cd)
1338 struct ice_aqc_delete_mir_rule *cmd;
1339 struct ice_aq_desc desc;
1341 /* rule_id should be in the range 0...63 */
1342 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1343 return ICE_ERR_OUT_OF_RANGE;
1345 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1347 cmd = &desc.params.del_rule;
1348 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1349 cmd->rule_id = CPU_TO_LE16(rule_id);
1352 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1354 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1358 * ice_aq_alloc_free_vsi_list
1359 * @hw: pointer to the HW struct
1360 * @vsi_list_id: VSI list ID returned or used for lookup
1361 * @lkup_type: switch rule filter lookup type
1362 * @opc: switch rules population command type - pass in the command opcode
1364 * allocates or free a VSI list resource
1366 static enum ice_status
1367 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1368 enum ice_sw_lkup_type lkup_type,
1369 enum ice_adminq_opc opc)
1371 struct ice_aqc_alloc_free_res_elem *sw_buf;
1372 struct ice_aqc_res_elem *vsi_ele;
1373 enum ice_status status;
1376 buf_len = sizeof(*sw_buf);
1377 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1378 ice_malloc(hw, buf_len);
1380 return ICE_ERR_NO_MEMORY;
1381 sw_buf->num_elems = CPU_TO_LE16(1);
1383 if (lkup_type == ICE_SW_LKUP_MAC ||
1384 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1385 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1386 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1387 lkup_type == ICE_SW_LKUP_PROMISC ||
1388 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1389 lkup_type == ICE_SW_LKUP_LAST) {
1390 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1391 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1393 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1395 status = ICE_ERR_PARAM;
1396 goto ice_aq_alloc_free_vsi_list_exit;
1399 if (opc == ice_aqc_opc_free_res)
1400 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1402 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1404 goto ice_aq_alloc_free_vsi_list_exit;
1406 if (opc == ice_aqc_opc_alloc_res) {
1407 vsi_ele = &sw_buf->elem[0];
1408 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1411 ice_aq_alloc_free_vsi_list_exit:
1412 ice_free(hw, sw_buf);
1417 * ice_aq_set_storm_ctrl - Sets storm control configuration
1418 * @hw: pointer to the HW struct
1419 * @bcast_thresh: represents the upper threshold for broadcast storm control
1420 * @mcast_thresh: represents the upper threshold for multicast storm control
1421 * @ctl_bitmask: storm control control knobs
1423 * Sets the storm control configuration (0x0280)
1426 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1429 struct ice_aqc_storm_cfg *cmd;
1430 struct ice_aq_desc desc;
1432 cmd = &desc.params.storm_conf;
1434 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1436 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1437 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1438 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1440 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1444 * ice_aq_get_storm_ctrl - gets storm control configuration
1445 * @hw: pointer to the HW struct
1446 * @bcast_thresh: represents the upper threshold for broadcast storm control
1447 * @mcast_thresh: represents the upper threshold for multicast storm control
1448 * @ctl_bitmask: storm control control knobs
1450 * Gets the storm control configuration (0x0281)
1453 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1456 enum ice_status status;
1457 struct ice_aq_desc desc;
1459 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1461 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1463 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1466 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1469 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1472 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1479 * ice_aq_sw_rules - add/update/remove switch rules
1480 * @hw: pointer to the HW struct
1481 * @rule_list: pointer to switch rule population list
1482 * @rule_list_sz: total size of the rule list in bytes
1483 * @num_rules: number of switch rules in the rule_list
1484 * @opc: switch rules population command type - pass in the command opcode
1485 * @cd: pointer to command details structure or NULL
1487 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1489 static enum ice_status
1490 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1491 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1493 struct ice_aq_desc desc;
1495 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1497 if (opc != ice_aqc_opc_add_sw_rules &&
1498 opc != ice_aqc_opc_update_sw_rules &&
1499 opc != ice_aqc_opc_remove_sw_rules)
1500 return ICE_ERR_PARAM;
1502 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1504 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1505 desc.params.sw_rules.num_rules_fltr_entry_index =
1506 CPU_TO_LE16(num_rules);
1507 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1511 * ice_aq_add_recipe - add switch recipe
1512 * @hw: pointer to the HW struct
1513 * @s_recipe_list: pointer to switch rule population list
1514 * @num_recipes: number of switch recipes in the list
1515 * @cd: pointer to command details structure or NULL
1520 ice_aq_add_recipe(struct ice_hw *hw,
1521 struct ice_aqc_recipe_data_elem *s_recipe_list,
1522 u16 num_recipes, struct ice_sq_cd *cd)
1524 struct ice_aqc_add_get_recipe *cmd;
1525 struct ice_aq_desc desc;
1528 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1529 cmd = &desc.params.add_get_recipe;
1530 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1532 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1533 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1535 buf_size = num_recipes * sizeof(*s_recipe_list);
1537 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1541 * ice_aq_get_recipe - get switch recipe
1542 * @hw: pointer to the HW struct
1543 * @s_recipe_list: pointer to switch rule population list
1544 * @num_recipes: pointer to the number of recipes (input and output)
1545 * @recipe_root: root recipe number of recipe(s) to retrieve
1546 * @cd: pointer to command details structure or NULL
1550 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1551 * On output, *num_recipes will equal the number of entries returned in
1554 * The caller must supply enough space in s_recipe_list to hold all possible
1555 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1558 ice_aq_get_recipe(struct ice_hw *hw,
1559 struct ice_aqc_recipe_data_elem *s_recipe_list,
1560 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1562 struct ice_aqc_add_get_recipe *cmd;
1563 struct ice_aq_desc desc;
1564 enum ice_status status;
1567 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1568 return ICE_ERR_PARAM;
1570 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1571 cmd = &desc.params.add_get_recipe;
1572 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1574 cmd->return_index = CPU_TO_LE16(recipe_root);
1575 cmd->num_sub_recipes = 0;
1577 buf_size = *num_recipes * sizeof(*s_recipe_list);
1579 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1580 /* cppcheck-suppress constArgument */
1581 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1587 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1588 * @hw: pointer to the HW struct
1589 * @profile_id: package profile ID to associate the recipe with
1590 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1591 * @cd: pointer to command details structure or NULL
1592 * Recipe to profile association (0x0291)
1595 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1596 struct ice_sq_cd *cd)
1598 struct ice_aqc_recipe_to_profile *cmd;
1599 struct ice_aq_desc desc;
1601 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1602 cmd = &desc.params.recipe_to_profile;
1603 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1604 cmd->profile_id = CPU_TO_LE16(profile_id);
1605 /* Set the recipe ID bit in the bitmask to let the device know which
1606 * profile we are associating the recipe to
1608 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1609 ICE_NONDMA_TO_NONDMA);
1611 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1615 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1616 * @hw: pointer to the HW struct
1617 * @profile_id: package profile ID to associate the recipe with
1618 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1619 * @cd: pointer to command details structure or NULL
1620 * Associate profile ID with given recipe (0x0293)
1623 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1624 struct ice_sq_cd *cd)
1626 struct ice_aqc_recipe_to_profile *cmd;
1627 struct ice_aq_desc desc;
1628 enum ice_status status;
1630 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1631 cmd = &desc.params.recipe_to_profile;
1632 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1633 cmd->profile_id = CPU_TO_LE16(profile_id);
1635 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1637 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1638 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1644 * ice_alloc_recipe - add recipe resource
1645 * @hw: pointer to the hardware structure
1646 * @rid: recipe ID returned as response to AQ call
1648 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1650 struct ice_aqc_alloc_free_res_elem *sw_buf;
1651 enum ice_status status;
1654 buf_len = sizeof(*sw_buf);
1655 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1657 return ICE_ERR_NO_MEMORY;
1659 sw_buf->num_elems = CPU_TO_LE16(1);
1660 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1661 ICE_AQC_RES_TYPE_S) |
1662 ICE_AQC_RES_TYPE_FLAG_SHARED);
1663 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1664 ice_aqc_opc_alloc_res, NULL);
1666 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1667 ice_free(hw, sw_buf);
1672 /* ice_init_port_info - Initialize port_info with switch configuration data
1673 * @pi: pointer to port_info
1674 * @vsi_port_num: VSI number or port number
1675 * @type: Type of switch element (port or VSI)
1676 * @swid: switch ID of the switch the element is attached to
1677 * @pf_vf_num: PF or VF number
1678 * @is_vf: true if the element is a VF, false otherwise
1681 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1682 u16 swid, u16 pf_vf_num, bool is_vf)
1685 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1686 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1688 pi->pf_vf_num = pf_vf_num;
1690 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1691 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1694 ice_debug(pi->hw, ICE_DBG_SW,
1695 "incorrect VSI/port type received\n");
1700 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1701 * @hw: pointer to the hardware structure
1703 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1705 struct ice_aqc_get_sw_cfg_resp *rbuf;
1706 enum ice_status status;
1707 u16 num_total_ports;
1713 num_total_ports = 1;
1715 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1716 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1719 return ICE_ERR_NO_MEMORY;
1721 /* Multiple calls to ice_aq_get_sw_cfg may be required
1722 * to get all the switch configuration information. The need
1723 * for additional calls is indicated by ice_aq_get_sw_cfg
1724 * writing a non-zero value in req_desc
1727 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1728 &req_desc, &num_elems, NULL);
1733 for (i = 0; i < num_elems; i++) {
1734 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1735 u16 pf_vf_num, swid, vsi_port_num;
1739 ele = rbuf[i].elements;
1740 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1741 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1743 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1744 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1746 swid = LE16_TO_CPU(ele->swid);
1748 if (LE16_TO_CPU(ele->pf_vf_num) &
1749 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1752 type = LE16_TO_CPU(ele->vsi_port_num) >>
1753 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1756 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1757 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1758 if (j == num_total_ports) {
1759 ice_debug(hw, ICE_DBG_SW,
1760 "more ports than expected\n");
1761 status = ICE_ERR_CFG;
1764 ice_init_port_info(hw->port_info,
1765 vsi_port_num, type, swid,
1773 } while (req_desc && !status);
1776 ice_free(hw, (void *)rbuf);
1781 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1782 * @hw: pointer to the hardware structure
1783 * @fi: filter info structure to fill/update
1785 * This helper function populates the lb_en and lan_en elements of the provided
1786 * ice_fltr_info struct using the switch's type and characteristics of the
1787 * switch rule being configured.
1789 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1793 if ((fi->flag & ICE_FLTR_TX) &&
1794 (fi->fltr_act == ICE_FWD_TO_VSI ||
1795 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1796 fi->fltr_act == ICE_FWD_TO_Q ||
1797 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1798 /* Setting LB for prune actions will result in replicated
1799 * packets to the internal switch that will be dropped.
1801 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1804 /* Set lan_en to TRUE if
1805 * 1. The switch is a VEB AND
1807 * 2.1 The lookup is a directional lookup like ethertype,
1808 * promiscuous, ethertype-MAC, promiscuous-VLAN
1809 * and default-port OR
1810 * 2.2 The lookup is VLAN, OR
1811 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1812 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1816 * The switch is a VEPA.
1818 * In all other cases, the LAN enable has to be set to false.
1821 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1822 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1823 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1824 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1825 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1826 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1827 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1828 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1829 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1830 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1839 * ice_fill_sw_rule - Helper function to fill switch rule structure
1840 * @hw: pointer to the hardware structure
1841 * @f_info: entry containing packet forwarding information
1842 * @s_rule: switch rule structure to be filled in based on mac_entry
1843 * @opc: switch rules population command type - pass in the command opcode
1846 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1847 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1849 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1857 if (opc == ice_aqc_opc_remove_sw_rules) {
1858 s_rule->pdata.lkup_tx_rx.act = 0;
1859 s_rule->pdata.lkup_tx_rx.index =
1860 CPU_TO_LE16(f_info->fltr_rule_id);
1861 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1865 eth_hdr_sz = sizeof(dummy_eth_header);
1866 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1868 /* initialize the ether header with a dummy header */
1869 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1870 ice_fill_sw_info(hw, f_info);
1872 switch (f_info->fltr_act) {
1873 case ICE_FWD_TO_VSI:
1874 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1875 ICE_SINGLE_ACT_VSI_ID_M;
1876 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1877 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1878 ICE_SINGLE_ACT_VALID_BIT;
1880 case ICE_FWD_TO_VSI_LIST:
1881 act |= ICE_SINGLE_ACT_VSI_LIST;
1882 act |= (f_info->fwd_id.vsi_list_id <<
1883 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1884 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1885 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1886 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1887 ICE_SINGLE_ACT_VALID_BIT;
1890 act |= ICE_SINGLE_ACT_TO_Q;
1891 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1892 ICE_SINGLE_ACT_Q_INDEX_M;
1894 case ICE_DROP_PACKET:
1895 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1896 ICE_SINGLE_ACT_VALID_BIT;
1898 case ICE_FWD_TO_QGRP:
1899 q_rgn = f_info->qgrp_size > 0 ?
1900 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1901 act |= ICE_SINGLE_ACT_TO_Q;
1902 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1903 ICE_SINGLE_ACT_Q_INDEX_M;
1904 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1905 ICE_SINGLE_ACT_Q_REGION_M;
1912 act |= ICE_SINGLE_ACT_LB_ENABLE;
1914 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1916 switch (f_info->lkup_type) {
1917 case ICE_SW_LKUP_MAC:
1918 daddr = f_info->l_data.mac.mac_addr;
1920 case ICE_SW_LKUP_VLAN:
1921 vlan_id = f_info->l_data.vlan.vlan_id;
1922 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1923 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1924 act |= ICE_SINGLE_ACT_PRUNE;
1925 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1928 case ICE_SW_LKUP_ETHERTYPE_MAC:
1929 daddr = f_info->l_data.ethertype_mac.mac_addr;
1931 case ICE_SW_LKUP_ETHERTYPE:
1932 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1933 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1935 case ICE_SW_LKUP_MAC_VLAN:
1936 daddr = f_info->l_data.mac_vlan.mac_addr;
1937 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1939 case ICE_SW_LKUP_PROMISC_VLAN:
1940 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1942 case ICE_SW_LKUP_PROMISC:
1943 daddr = f_info->l_data.mac_vlan.mac_addr;
1949 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1950 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1951 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1953 /* Recipe set depending on lookup type */
1954 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1955 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1956 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1959 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1960 ICE_NONDMA_TO_NONDMA);
1962 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1963 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1964 *off = CPU_TO_BE16(vlan_id);
1967 /* Create the switch rule with the final dummy Ethernet header */
1968 if (opc != ice_aqc_opc_update_sw_rules)
1969 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1973 * ice_add_marker_act
1974 * @hw: pointer to the hardware structure
1975 * @m_ent: the management entry for which sw marker needs to be added
1976 * @sw_marker: sw marker to tag the Rx descriptor with
1977 * @l_id: large action resource ID
1979 * Create a large action to hold software marker and update the switch rule
1980 * entry pointed by m_ent with newly created large action
1982 static enum ice_status
1983 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1984 u16 sw_marker, u16 l_id)
1986 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1987 /* For software marker we need 3 large actions
1988 * 1. FWD action: FWD TO VSI or VSI LIST
1989 * 2. GENERIC VALUE action to hold the profile ID
1990 * 3. GENERIC VALUE action to hold the software marker ID
1992 const u16 num_lg_acts = 3;
1993 enum ice_status status;
1999 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2000 return ICE_ERR_PARAM;
2002 /* Create two back-to-back switch rules and submit them to the HW using
2003 * one memory buffer:
2007 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2008 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2009 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2011 return ICE_ERR_NO_MEMORY;
2013 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2015 /* Fill in the first switch rule i.e. large action */
2016 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2017 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2018 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2020 /* First action VSI forwarding or VSI list forwarding depending on how
2023 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2024 m_ent->fltr_info.fwd_id.hw_vsi_id;
2026 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2027 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2028 ICE_LG_ACT_VSI_LIST_ID_M;
2029 if (m_ent->vsi_count > 1)
2030 act |= ICE_LG_ACT_VSI_LIST;
2031 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2033 /* Second action descriptor type */
2034 act = ICE_LG_ACT_GENERIC;
2036 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2037 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2039 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2040 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2042 /* Third action Marker value */
2043 act |= ICE_LG_ACT_GENERIC;
2044 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2045 ICE_LG_ACT_GENERIC_VALUE_M;
2047 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2049 /* call the fill switch rule to fill the lookup Tx Rx structure */
2050 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2051 ice_aqc_opc_update_sw_rules);
2053 /* Update the action to point to the large action ID */
2054 rx_tx->pdata.lkup_tx_rx.act =
2055 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2056 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2057 ICE_SINGLE_ACT_PTR_VAL_M));
2059 /* Use the filter rule ID of the previously created rule with single
2060 * act. Once the update happens, hardware will treat this as large
2063 rx_tx->pdata.lkup_tx_rx.index =
2064 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2066 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2067 ice_aqc_opc_update_sw_rules, NULL);
2069 m_ent->lg_act_idx = l_id;
2070 m_ent->sw_marker_id = sw_marker;
2073 ice_free(hw, lg_act);
2078 * ice_add_counter_act - add/update filter rule with counter action
2079 * @hw: pointer to the hardware structure
2080 * @m_ent: the management entry for which counter needs to be added
2081 * @counter_id: VLAN counter ID returned as part of allocate resource
2082 * @l_id: large action resource ID
2084 static enum ice_status
2085 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2086 u16 counter_id, u16 l_id)
2088 struct ice_aqc_sw_rules_elem *lg_act;
2089 struct ice_aqc_sw_rules_elem *rx_tx;
2090 enum ice_status status;
2091 /* 2 actions will be added while adding a large action counter */
2092 const int num_acts = 2;
2099 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2100 return ICE_ERR_PARAM;
2102 /* Create two back-to-back switch rules and submit them to the HW using
2103 * one memory buffer:
2107 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2108 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2109 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2112 return ICE_ERR_NO_MEMORY;
2114 rx_tx = (struct ice_aqc_sw_rules_elem *)
2115 ((u8 *)lg_act + lg_act_size);
2117 /* Fill in the first switch rule i.e. large action */
2118 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2119 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2120 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2122 /* First action VSI forwarding or VSI list forwarding depending on how
2125 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2126 m_ent->fltr_info.fwd_id.hw_vsi_id;
2128 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2129 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2130 ICE_LG_ACT_VSI_LIST_ID_M;
2131 if (m_ent->vsi_count > 1)
2132 act |= ICE_LG_ACT_VSI_LIST;
2133 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2135 /* Second action counter ID */
2136 act = ICE_LG_ACT_STAT_COUNT;
2137 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2138 ICE_LG_ACT_STAT_COUNT_M;
2139 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2141 /* call the fill switch rule to fill the lookup Tx Rx structure */
2142 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2143 ice_aqc_opc_update_sw_rules);
2145 act = ICE_SINGLE_ACT_PTR;
2146 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2147 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2149 /* Use the filter rule ID of the previously created rule with single
2150 * act. Once the update happens, hardware will treat this as large
2153 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2154 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2156 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2157 ice_aqc_opc_update_sw_rules, NULL);
2159 m_ent->lg_act_idx = l_id;
2160 m_ent->counter_index = counter_id;
2163 ice_free(hw, lg_act);
2168 * ice_create_vsi_list_map
2169 * @hw: pointer to the hardware structure
2170 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2171 * @num_vsi: number of VSI handles in the array
2172 * @vsi_list_id: VSI list ID generated as part of allocate resource
2174 * Helper function to create a new entry of VSI list ID to VSI mapping
2175 * using the given VSI list ID
2177 static struct ice_vsi_list_map_info *
2178 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2181 struct ice_switch_info *sw = hw->switch_info;
2182 struct ice_vsi_list_map_info *v_map;
2185 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2190 v_map->vsi_list_id = vsi_list_id;
2192 for (i = 0; i < num_vsi; i++)
2193 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2195 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2200 * ice_update_vsi_list_rule
2201 * @hw: pointer to the hardware structure
2202 * @vsi_handle_arr: array of VSI handles to form a VSI list
2203 * @num_vsi: number of VSI handles in the array
2204 * @vsi_list_id: VSI list ID generated as part of allocate resource
2205 * @remove: Boolean value to indicate if this is a remove action
2206 * @opc: switch rules population command type - pass in the command opcode
2207 * @lkup_type: lookup type of the filter
2209 * Call AQ command to add a new switch rule or update existing switch rule
2210 * using the given VSI list ID
2212 static enum ice_status
2213 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2214 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2215 enum ice_sw_lkup_type lkup_type)
2217 struct ice_aqc_sw_rules_elem *s_rule;
2218 enum ice_status status;
2224 return ICE_ERR_PARAM;
2226 if (lkup_type == ICE_SW_LKUP_MAC ||
2227 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2228 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2229 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2230 lkup_type == ICE_SW_LKUP_PROMISC ||
2231 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2232 lkup_type == ICE_SW_LKUP_LAST)
2233 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2234 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2235 else if (lkup_type == ICE_SW_LKUP_VLAN)
2236 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2237 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2239 return ICE_ERR_PARAM;
2241 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2242 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2244 return ICE_ERR_NO_MEMORY;
2245 for (i = 0; i < num_vsi; i++) {
2246 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2247 status = ICE_ERR_PARAM;
2250 /* AQ call requires hw_vsi_id(s) */
2251 s_rule->pdata.vsi_list.vsi[i] =
2252 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2255 s_rule->type = CPU_TO_LE16(type);
2256 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2257 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2259 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2262 ice_free(hw, s_rule);
2267 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2268 * @hw: pointer to the HW struct
2269 * @vsi_handle_arr: array of VSI handles to form a VSI list
2270 * @num_vsi: number of VSI handles in the array
2271 * @vsi_list_id: stores the ID of the VSI list to be created
2272 * @lkup_type: switch rule filter's lookup type
2274 static enum ice_status
2275 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2276 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2278 enum ice_status status;
2280 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2281 ice_aqc_opc_alloc_res);
2285 /* Update the newly created VSI list to include the specified VSIs */
2286 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2287 *vsi_list_id, false,
2288 ice_aqc_opc_add_sw_rules, lkup_type);
2292 * ice_create_pkt_fwd_rule
2293 * @hw: pointer to the hardware structure
2294 * @f_entry: entry containing packet forwarding information
2296 * Create switch rule with given filter information and add an entry
2297 * to the corresponding filter management list to track this switch rule
2300 static enum ice_status
2301 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2302 struct ice_fltr_list_entry *f_entry)
2304 struct ice_fltr_mgmt_list_entry *fm_entry;
2305 struct ice_aqc_sw_rules_elem *s_rule;
2306 enum ice_sw_lkup_type l_type;
2307 struct ice_sw_recipe *recp;
2308 enum ice_status status;
2310 s_rule = (struct ice_aqc_sw_rules_elem *)
2311 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2313 return ICE_ERR_NO_MEMORY;
2314 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2315 ice_malloc(hw, sizeof(*fm_entry));
2317 status = ICE_ERR_NO_MEMORY;
2318 goto ice_create_pkt_fwd_rule_exit;
2321 fm_entry->fltr_info = f_entry->fltr_info;
2323 /* Initialize all the fields for the management entry */
2324 fm_entry->vsi_count = 1;
2325 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2326 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2327 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2329 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2330 ice_aqc_opc_add_sw_rules);
2332 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2333 ice_aqc_opc_add_sw_rules, NULL);
2335 ice_free(hw, fm_entry);
2336 goto ice_create_pkt_fwd_rule_exit;
2339 f_entry->fltr_info.fltr_rule_id =
2340 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2341 fm_entry->fltr_info.fltr_rule_id =
2342 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2344 /* The book keeping entries will get removed when base driver
2345 * calls remove filter AQ command
2347 l_type = fm_entry->fltr_info.lkup_type;
2348 recp = &hw->switch_info->recp_list[l_type];
2349 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2351 ice_create_pkt_fwd_rule_exit:
2352 ice_free(hw, s_rule);
2357 * ice_update_pkt_fwd_rule
2358 * @hw: pointer to the hardware structure
2359 * @f_info: filter information for switch rule
2361 * Call AQ command to update a previously created switch rule with a
2364 static enum ice_status
2365 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2367 struct ice_aqc_sw_rules_elem *s_rule;
2368 enum ice_status status;
2370 s_rule = (struct ice_aqc_sw_rules_elem *)
2371 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2373 return ICE_ERR_NO_MEMORY;
2375 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2377 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2379 /* Update switch rule with new rule set to forward VSI list */
2380 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2381 ice_aqc_opc_update_sw_rules, NULL);
2383 ice_free(hw, s_rule);
2388 * ice_update_sw_rule_bridge_mode
2389 * @hw: pointer to the HW struct
2391 * Updates unicast switch filter rules based on VEB/VEPA mode
2393 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2395 struct ice_switch_info *sw = hw->switch_info;
2396 struct ice_fltr_mgmt_list_entry *fm_entry;
2397 enum ice_status status = ICE_SUCCESS;
2398 struct LIST_HEAD_TYPE *rule_head;
2399 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2401 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2402 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2404 ice_acquire_lock(rule_lock);
2405 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2407 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2408 u8 *addr = fi->l_data.mac.mac_addr;
2410 /* Update unicast Tx rules to reflect the selected
2413 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2414 (fi->fltr_act == ICE_FWD_TO_VSI ||
2415 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2416 fi->fltr_act == ICE_FWD_TO_Q ||
2417 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2418 status = ice_update_pkt_fwd_rule(hw, fi);
2424 ice_release_lock(rule_lock);
2430 * ice_add_update_vsi_list
2431 * @hw: pointer to the hardware structure
2432 * @m_entry: pointer to current filter management list entry
2433 * @cur_fltr: filter information from the book keeping entry
2434 * @new_fltr: filter information with the new VSI to be added
2436 * Call AQ command to add or update previously created VSI list with new VSI.
2438 * Helper function to do book keeping associated with adding filter information
2439 * The algorithm to do the book keeping is described below :
2440 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2441 * if only one VSI has been added till now
2442 * Allocate a new VSI list and add two VSIs
2443 * to this list using switch rule command
2444 * Update the previously created switch rule with the
2445 * newly created VSI list ID
2446 * if a VSI list was previously created
2447 * Add the new VSI to the previously created VSI list set
2448 * using the update switch rule command
2450 static enum ice_status
2451 ice_add_update_vsi_list(struct ice_hw *hw,
2452 struct ice_fltr_mgmt_list_entry *m_entry,
2453 struct ice_fltr_info *cur_fltr,
2454 struct ice_fltr_info *new_fltr)
2456 enum ice_status status = ICE_SUCCESS;
2457 u16 vsi_list_id = 0;
2459 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2460 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2461 return ICE_ERR_NOT_IMPL;
2463 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2464 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2465 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2466 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2467 return ICE_ERR_NOT_IMPL;
2469 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2470 /* Only one entry existed in the mapping and it was not already
2471 * a part of a VSI list. So, create a VSI list with the old and
2474 struct ice_fltr_info tmp_fltr;
2475 u16 vsi_handle_arr[2];
2477 /* A rule already exists with the new VSI being added */
2478 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2479 return ICE_ERR_ALREADY_EXISTS;
2481 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2482 vsi_handle_arr[1] = new_fltr->vsi_handle;
2483 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2485 new_fltr->lkup_type);
2489 tmp_fltr = *new_fltr;
2490 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2491 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2492 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2493 /* Update the previous switch rule of "MAC forward to VSI" to
2494 * "MAC fwd to VSI list"
2496 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2500 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2501 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2502 m_entry->vsi_list_info =
2503 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2506 /* If this entry was large action then the large action needs
2507 * to be updated to point to FWD to VSI list
2509 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2511 ice_add_marker_act(hw, m_entry,
2512 m_entry->sw_marker_id,
2513 m_entry->lg_act_idx);
2515 u16 vsi_handle = new_fltr->vsi_handle;
2516 enum ice_adminq_opc opcode;
2518 if (!m_entry->vsi_list_info)
2521 /* A rule already exists with the new VSI being added */
2522 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2525 /* Update the previously created VSI list set with
2526 * the new VSI ID passed in
2528 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2529 opcode = ice_aqc_opc_update_sw_rules;
2531 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2532 vsi_list_id, false, opcode,
2533 new_fltr->lkup_type);
2534 /* update VSI list mapping info with new VSI ID */
2536 ice_set_bit(vsi_handle,
2537 m_entry->vsi_list_info->vsi_map);
2540 m_entry->vsi_count++;
2545 * ice_find_rule_entry - Search a rule entry
2546 * @hw: pointer to the hardware structure
2547 * @recp_id: lookup type for which the specified rule needs to be searched
2548 * @f_info: rule information
2550 * Helper function to search for a given rule entry
2551 * Returns pointer to entry storing the rule if found
2553 static struct ice_fltr_mgmt_list_entry *
2554 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2556 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2557 struct ice_switch_info *sw = hw->switch_info;
2558 struct LIST_HEAD_TYPE *list_head;
2560 list_head = &sw->recp_list[recp_id].filt_rules;
2561 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2563 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2564 sizeof(f_info->l_data)) &&
2565 f_info->flag == list_itr->fltr_info.flag) {
2574 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2575 * @hw: pointer to the hardware structure
2576 * @recp_id: lookup type for which VSI lists needs to be searched
2577 * @vsi_handle: VSI handle to be found in VSI list
2578 * @vsi_list_id: VSI list ID found containing vsi_handle
2580 * Helper function to search a VSI list with single entry containing given VSI
2581 * handle element. This can be extended further to search VSI list with more
2582 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2584 static struct ice_vsi_list_map_info *
2585 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2588 struct ice_vsi_list_map_info *map_info = NULL;
2589 struct ice_switch_info *sw = hw->switch_info;
2590 struct LIST_HEAD_TYPE *list_head;
2592 list_head = &sw->recp_list[recp_id].filt_rules;
2593 if (sw->recp_list[recp_id].adv_rule) {
2594 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2596 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2597 ice_adv_fltr_mgmt_list_entry,
2599 if (list_itr->vsi_list_info) {
2600 map_info = list_itr->vsi_list_info;
2601 if (ice_is_bit_set(map_info->vsi_map,
2603 *vsi_list_id = map_info->vsi_list_id;
2609 struct ice_fltr_mgmt_list_entry *list_itr;
2611 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2612 ice_fltr_mgmt_list_entry,
2614 if (list_itr->vsi_count == 1 &&
2615 list_itr->vsi_list_info) {
2616 map_info = list_itr->vsi_list_info;
2617 if (ice_is_bit_set(map_info->vsi_map,
2619 *vsi_list_id = map_info->vsi_list_id;
2629 * ice_add_rule_internal - add rule for a given lookup type
2630 * @hw: pointer to the hardware structure
2631 * @recp_id: lookup type (recipe ID) for which rule has to be added
2632 * @f_entry: structure containing MAC forwarding information
2634 * Adds or updates the rule lists for a given recipe
2636 static enum ice_status
2637 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2638 struct ice_fltr_list_entry *f_entry)
2640 struct ice_switch_info *sw = hw->switch_info;
2641 struct ice_fltr_info *new_fltr, *cur_fltr;
2642 struct ice_fltr_mgmt_list_entry *m_entry;
2643 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2644 enum ice_status status = ICE_SUCCESS;
2646 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2647 return ICE_ERR_PARAM;
2649 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2650 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2651 f_entry->fltr_info.fwd_id.hw_vsi_id =
2652 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2654 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2656 ice_acquire_lock(rule_lock);
2657 new_fltr = &f_entry->fltr_info;
2658 if (new_fltr->flag & ICE_FLTR_RX)
2659 new_fltr->src = hw->port_info->lport;
2660 else if (new_fltr->flag & ICE_FLTR_TX)
2662 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2664 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2666 status = ice_create_pkt_fwd_rule(hw, f_entry);
2667 goto exit_add_rule_internal;
2670 cur_fltr = &m_entry->fltr_info;
2671 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2673 exit_add_rule_internal:
2674 ice_release_lock(rule_lock);
2679 * ice_remove_vsi_list_rule
2680 * @hw: pointer to the hardware structure
2681 * @vsi_list_id: VSI list ID generated as part of allocate resource
2682 * @lkup_type: switch rule filter lookup type
2684 * The VSI list should be emptied before this function is called to remove the
2687 static enum ice_status
2688 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2689 enum ice_sw_lkup_type lkup_type)
2691 struct ice_aqc_sw_rules_elem *s_rule;
2692 enum ice_status status;
2695 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2696 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2698 return ICE_ERR_NO_MEMORY;
2700 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2701 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2703 /* Free the vsi_list resource that we allocated. It is assumed that the
2704 * list is empty at this point.
2706 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2707 ice_aqc_opc_free_res);
2709 ice_free(hw, s_rule);
2714 * ice_rem_update_vsi_list
2715 * @hw: pointer to the hardware structure
2716 * @vsi_handle: VSI handle of the VSI to remove
2717 * @fm_list: filter management entry for which the VSI list management needs to
2720 static enum ice_status
2721 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2722 struct ice_fltr_mgmt_list_entry *fm_list)
2724 enum ice_sw_lkup_type lkup_type;
2725 enum ice_status status = ICE_SUCCESS;
2728 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2729 fm_list->vsi_count == 0)
2730 return ICE_ERR_PARAM;
2732 /* A rule with the VSI being removed does not exist */
2733 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2734 return ICE_ERR_DOES_NOT_EXIST;
2736 lkup_type = fm_list->fltr_info.lkup_type;
2737 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2738 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2739 ice_aqc_opc_update_sw_rules,
2744 fm_list->vsi_count--;
2745 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2747 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2748 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2749 struct ice_vsi_list_map_info *vsi_list_info =
2750 fm_list->vsi_list_info;
2753 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2755 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2756 return ICE_ERR_OUT_OF_RANGE;
2758 /* Make sure VSI list is empty before removing it below */
2759 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2761 ice_aqc_opc_update_sw_rules,
2766 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2767 tmp_fltr_info.fwd_id.hw_vsi_id =
2768 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2769 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2770 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2772 ice_debug(hw, ICE_DBG_SW,
2773 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2774 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2778 fm_list->fltr_info = tmp_fltr_info;
2781 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2782 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2783 struct ice_vsi_list_map_info *vsi_list_info =
2784 fm_list->vsi_list_info;
2786 /* Remove the VSI list since it is no longer used */
2787 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2789 ice_debug(hw, ICE_DBG_SW,
2790 "Failed to remove VSI list %d, error %d\n",
2791 vsi_list_id, status);
2795 LIST_DEL(&vsi_list_info->list_entry);
2796 ice_free(hw, vsi_list_info);
2797 fm_list->vsi_list_info = NULL;
2804 * ice_remove_rule_internal - Remove a filter rule of a given type
2806 * @hw: pointer to the hardware structure
2807 * @recp_id: recipe ID for which the rule needs to removed
2808 * @f_entry: rule entry containing filter information
2810 static enum ice_status
2811 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2812 struct ice_fltr_list_entry *f_entry)
2814 struct ice_switch_info *sw = hw->switch_info;
2815 struct ice_fltr_mgmt_list_entry *list_elem;
2816 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2817 enum ice_status status = ICE_SUCCESS;
2818 bool remove_rule = false;
2821 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2822 return ICE_ERR_PARAM;
2823 f_entry->fltr_info.fwd_id.hw_vsi_id =
2824 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2826 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2827 ice_acquire_lock(rule_lock);
2828 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2830 status = ICE_ERR_DOES_NOT_EXIST;
2834 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2836 } else if (!list_elem->vsi_list_info) {
2837 status = ICE_ERR_DOES_NOT_EXIST;
2839 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2840 /* a ref_cnt > 1 indicates that the vsi_list is being
2841 * shared by multiple rules. Decrement the ref_cnt and
2842 * remove this rule, but do not modify the list, as it
2843 * is in-use by other rules.
2845 list_elem->vsi_list_info->ref_cnt--;
2848 /* a ref_cnt of 1 indicates the vsi_list is only used
2849 * by one rule. However, the original removal request is only
2850 * for a single VSI. Update the vsi_list first, and only
2851 * remove the rule if there are no further VSIs in this list.
2853 vsi_handle = f_entry->fltr_info.vsi_handle;
2854 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2857 /* if VSI count goes to zero after updating the VSI list */
2858 if (list_elem->vsi_count == 0)
2863 /* Remove the lookup rule */
2864 struct ice_aqc_sw_rules_elem *s_rule;
2866 s_rule = (struct ice_aqc_sw_rules_elem *)
2867 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2869 status = ICE_ERR_NO_MEMORY;
2873 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2874 ice_aqc_opc_remove_sw_rules);
2876 status = ice_aq_sw_rules(hw, s_rule,
2877 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2878 ice_aqc_opc_remove_sw_rules, NULL);
2880 /* Remove a book keeping from the list */
2881 ice_free(hw, s_rule);
2886 LIST_DEL(&list_elem->list_entry);
2887 ice_free(hw, list_elem);
2890 ice_release_lock(rule_lock);
2895 * ice_aq_get_res_alloc - get allocated resources
2896 * @hw: pointer to the HW struct
2897 * @num_entries: pointer to u16 to store the number of resource entries returned
2898 * @buf: pointer to user-supplied buffer
2899 * @buf_size: size of buff
2900 * @cd: pointer to command details structure or NULL
2902 * The user-supplied buffer must be large enough to store the resource
2903 * information for all resource types. Each resource type is an
2904 * ice_aqc_get_res_resp_data_elem structure.
2907 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2908 u16 buf_size, struct ice_sq_cd *cd)
2910 struct ice_aqc_get_res_alloc *resp;
2911 enum ice_status status;
2912 struct ice_aq_desc desc;
2915 return ICE_ERR_BAD_PTR;
2917 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2918 return ICE_ERR_INVAL_SIZE;
2920 resp = &desc.params.get_res;
2922 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2923 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2925 if (!status && num_entries)
2926 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2932 * ice_aq_get_res_descs - get allocated resource descriptors
2933 * @hw: pointer to the hardware structure
2934 * @num_entries: number of resource entries in buffer
2935 * @buf: Indirect buffer to hold data parameters and response
2936 * @buf_size: size of buffer for indirect commands
2937 * @res_type: resource type
2938 * @res_shared: is resource shared
2939 * @desc_id: input - first desc ID to start; output - next desc ID
2940 * @cd: pointer to command details structure or NULL
2943 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2944 struct ice_aqc_get_allocd_res_desc_resp *buf,
2945 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2946 struct ice_sq_cd *cd)
2948 struct ice_aqc_get_allocd_res_desc *cmd;
2949 struct ice_aq_desc desc;
2950 enum ice_status status;
2952 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2954 cmd = &desc.params.get_res_desc;
2957 return ICE_ERR_PARAM;
2959 if (buf_size != (num_entries * sizeof(*buf)))
2960 return ICE_ERR_PARAM;
2962 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2964 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2965 ICE_AQC_RES_TYPE_M) | (res_shared ?
2966 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2967 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2969 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2971 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2977 * ice_add_mac - Add a MAC address based filter rule
2978 * @hw: pointer to the hardware structure
2979 * @m_list: list of MAC addresses and forwarding information
2981 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2982 * multiple unicast addresses, the function assumes that all the
2983 * addresses are unique in a given add_mac call. It doesn't
2984 * check for duplicates in this case, removing duplicates from a given
2985 * list should be taken care of in the caller of this function.
2988 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2990 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2991 struct ice_fltr_list_entry *m_list_itr;
2992 struct LIST_HEAD_TYPE *rule_head;
2993 u16 elem_sent, total_elem_left;
2994 struct ice_switch_info *sw;
2995 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2996 enum ice_status status = ICE_SUCCESS;
2997 u16 num_unicast = 0;
3001 return ICE_ERR_PARAM;
3003 sw = hw->switch_info;
3004 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3005 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3007 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3011 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3012 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3013 if (!ice_is_vsi_valid(hw, vsi_handle))
3014 return ICE_ERR_PARAM;
3015 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3016 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3017 /* update the src in case it is VSI num */
3018 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3019 return ICE_ERR_PARAM;
3020 m_list_itr->fltr_info.src = hw_vsi_id;
3021 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3022 IS_ZERO_ETHER_ADDR(add))
3023 return ICE_ERR_PARAM;
3024 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3025 /* Don't overwrite the unicast address */
3026 ice_acquire_lock(rule_lock);
3027 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3028 &m_list_itr->fltr_info)) {
3029 ice_release_lock(rule_lock);
3030 return ICE_ERR_ALREADY_EXISTS;
3032 ice_release_lock(rule_lock);
3034 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3035 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3036 m_list_itr->status =
3037 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3039 if (m_list_itr->status)
3040 return m_list_itr->status;
3044 ice_acquire_lock(rule_lock);
3045 /* Exit if no suitable entries were found for adding bulk switch rule */
3047 status = ICE_SUCCESS;
3048 goto ice_add_mac_exit;
3051 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3053 /* Allocate switch rule buffer for the bulk update for unicast */
3054 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3055 s_rule = (struct ice_aqc_sw_rules_elem *)
3056 ice_calloc(hw, num_unicast, s_rule_size);
3058 status = ICE_ERR_NO_MEMORY;
3059 goto ice_add_mac_exit;
3063 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3065 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3066 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3068 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3069 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3070 ice_aqc_opc_add_sw_rules);
3071 r_iter = (struct ice_aqc_sw_rules_elem *)
3072 ((u8 *)r_iter + s_rule_size);
3076 /* Call AQ bulk switch rule update for all unicast addresses */
3078 /* Call AQ switch rule in AQ_MAX chunk */
3079 for (total_elem_left = num_unicast; total_elem_left > 0;
3080 total_elem_left -= elem_sent) {
3081 struct ice_aqc_sw_rules_elem *entry = r_iter;
3083 elem_sent = min(total_elem_left,
3084 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3085 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3086 elem_sent, ice_aqc_opc_add_sw_rules,
3089 goto ice_add_mac_exit;
3090 r_iter = (struct ice_aqc_sw_rules_elem *)
3091 ((u8 *)r_iter + (elem_sent * s_rule_size));
3094 /* Fill up rule ID based on the value returned from FW */
3096 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3098 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3099 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3100 struct ice_fltr_mgmt_list_entry *fm_entry;
3102 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3103 f_info->fltr_rule_id =
3104 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3105 f_info->fltr_act = ICE_FWD_TO_VSI;
3106 /* Create an entry to track this MAC address */
3107 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3108 ice_malloc(hw, sizeof(*fm_entry));
3110 status = ICE_ERR_NO_MEMORY;
3111 goto ice_add_mac_exit;
3113 fm_entry->fltr_info = *f_info;
3114 fm_entry->vsi_count = 1;
3115 /* The book keeping entries will get removed when
3116 * base driver calls remove filter AQ command
3119 LIST_ADD(&fm_entry->list_entry, rule_head);
3120 r_iter = (struct ice_aqc_sw_rules_elem *)
3121 ((u8 *)r_iter + s_rule_size);
3126 ice_release_lock(rule_lock);
3128 ice_free(hw, s_rule);
3133 * ice_add_vlan_internal - Add one VLAN based filter rule
3134 * @hw: pointer to the hardware structure
3135 * @f_entry: filter entry containing one VLAN information
3137 static enum ice_status
3138 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3140 struct ice_switch_info *sw = hw->switch_info;
3141 struct ice_fltr_mgmt_list_entry *v_list_itr;
3142 struct ice_fltr_info *new_fltr, *cur_fltr;
3143 enum ice_sw_lkup_type lkup_type;
3144 u16 vsi_list_id = 0, vsi_handle;
3145 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3146 enum ice_status status = ICE_SUCCESS;
3148 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3149 return ICE_ERR_PARAM;
3151 f_entry->fltr_info.fwd_id.hw_vsi_id =
3152 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3153 new_fltr = &f_entry->fltr_info;
3155 /* VLAN ID should only be 12 bits */
3156 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3157 return ICE_ERR_PARAM;
3159 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3160 return ICE_ERR_PARAM;
3162 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3163 lkup_type = new_fltr->lkup_type;
3164 vsi_handle = new_fltr->vsi_handle;
3165 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3166 ice_acquire_lock(rule_lock);
3167 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3169 struct ice_vsi_list_map_info *map_info = NULL;
3171 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3172 /* All VLAN pruning rules use a VSI list. Check if
3173 * there is already a VSI list containing VSI that we
3174 * want to add. If found, use the same vsi_list_id for
3175 * this new VLAN rule or else create a new list.
3177 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3181 status = ice_create_vsi_list_rule(hw,
3189 /* Convert the action to forwarding to a VSI list. */
3190 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3191 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3194 status = ice_create_pkt_fwd_rule(hw, f_entry);
3196 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3199 status = ICE_ERR_DOES_NOT_EXIST;
3202 /* reuse VSI list for new rule and increment ref_cnt */
3204 v_list_itr->vsi_list_info = map_info;
3205 map_info->ref_cnt++;
3207 v_list_itr->vsi_list_info =
3208 ice_create_vsi_list_map(hw, &vsi_handle,
3212 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3213 /* Update existing VSI list to add new VSI ID only if it used
3216 cur_fltr = &v_list_itr->fltr_info;
3217 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3220 /* If VLAN rule exists and VSI list being used by this rule is
3221 * referenced by more than 1 VLAN rule. Then create a new VSI
3222 * list appending previous VSI with new VSI and update existing
3223 * VLAN rule to point to new VSI list ID
3225 struct ice_fltr_info tmp_fltr;
3226 u16 vsi_handle_arr[2];
3229 /* Current implementation only supports reusing VSI list with
3230 * one VSI count. We should never hit below condition
3232 if (v_list_itr->vsi_count > 1 &&
3233 v_list_itr->vsi_list_info->ref_cnt > 1) {
3234 ice_debug(hw, ICE_DBG_SW,
3235 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3236 status = ICE_ERR_CFG;
3241 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3244 /* A rule already exists with the new VSI being added */
3245 if (cur_handle == vsi_handle) {
3246 status = ICE_ERR_ALREADY_EXISTS;
3250 vsi_handle_arr[0] = cur_handle;
3251 vsi_handle_arr[1] = vsi_handle;
3252 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3253 &vsi_list_id, lkup_type);
3257 tmp_fltr = v_list_itr->fltr_info;
3258 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3259 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3260 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3261 /* Update the previous switch rule to a new VSI list which
3262 * includes current VSI that is requested
3264 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3268 /* before overriding VSI list map info. decrement ref_cnt of
3271 v_list_itr->vsi_list_info->ref_cnt--;
3273 /* now update to newly created list */
3274 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3275 v_list_itr->vsi_list_info =
3276 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3278 v_list_itr->vsi_count++;
3282 ice_release_lock(rule_lock);
3287 * ice_add_vlan - Add VLAN based filter rule
3288 * @hw: pointer to the hardware structure
3289 * @v_list: list of VLAN entries and forwarding information
3292 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3294 struct ice_fltr_list_entry *v_list_itr;
3297 return ICE_ERR_PARAM;
3299 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3301 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3302 return ICE_ERR_PARAM;
3303 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3304 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3305 if (v_list_itr->status)
3306 return v_list_itr->status;
3312 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3313 * @hw: pointer to the hardware structure
3314 * @mv_list: list of MAC and VLAN filters
3316 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3317 * pruning bits enabled, then it is the responsibility of the caller to make
3318 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3319 * VLAN won't be received on that VSI otherwise.
3322 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3324 struct ice_fltr_list_entry *mv_list_itr;
3326 if (!mv_list || !hw)
3327 return ICE_ERR_PARAM;
3329 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3331 enum ice_sw_lkup_type l_type =
3332 mv_list_itr->fltr_info.lkup_type;
3334 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3335 return ICE_ERR_PARAM;
3336 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3337 mv_list_itr->status =
3338 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3340 if (mv_list_itr->status)
3341 return mv_list_itr->status;
3347 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3348 * @hw: pointer to the hardware structure
3349 * @em_list: list of ether type MAC filter, MAC is optional
3351 * This function requires the caller to populate the entries in
3352 * the filter list with the necessary fields (including flags to
3353 * indicate Tx or Rx rules).
3356 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3358 struct ice_fltr_list_entry *em_list_itr;
3360 if (!em_list || !hw)
3361 return ICE_ERR_PARAM;
3363 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3365 enum ice_sw_lkup_type l_type =
3366 em_list_itr->fltr_info.lkup_type;
3368 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3369 l_type != ICE_SW_LKUP_ETHERTYPE)
3370 return ICE_ERR_PARAM;
3372 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3374 if (em_list_itr->status)
3375 return em_list_itr->status;
3381 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3382 * @hw: pointer to the hardware structure
3383 * @em_list: list of ethertype or ethertype MAC entries
3386 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3388 struct ice_fltr_list_entry *em_list_itr, *tmp;
3390 if (!em_list || !hw)
3391 return ICE_ERR_PARAM;
3393 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3395 enum ice_sw_lkup_type l_type =
3396 em_list_itr->fltr_info.lkup_type;
3398 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3399 l_type != ICE_SW_LKUP_ETHERTYPE)
3400 return ICE_ERR_PARAM;
3402 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3404 if (em_list_itr->status)
3405 return em_list_itr->status;
3411 * ice_rem_sw_rule_info
3412 * @hw: pointer to the hardware structure
3413 * @rule_head: pointer to the switch list structure that we want to delete
3416 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3418 if (!LIST_EMPTY(rule_head)) {
3419 struct ice_fltr_mgmt_list_entry *entry;
3420 struct ice_fltr_mgmt_list_entry *tmp;
3422 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3423 ice_fltr_mgmt_list_entry, list_entry) {
3424 LIST_DEL(&entry->list_entry);
3425 ice_free(hw, entry);
3431 * ice_rem_adv_rule_info
3432 * @hw: pointer to the hardware structure
3433 * @rule_head: pointer to the switch list structure that we want to delete
3436 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3438 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3439 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3441 if (LIST_EMPTY(rule_head))
3444 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3445 ice_adv_fltr_mgmt_list_entry, list_entry) {
3446 LIST_DEL(&lst_itr->list_entry);
3447 ice_free(hw, lst_itr->lkups);
3448 ice_free(hw, lst_itr);
3453 * ice_rem_all_sw_rules_info
3454 * @hw: pointer to the hardware structure
3456 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3458 struct ice_switch_info *sw = hw->switch_info;
3461 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3462 struct LIST_HEAD_TYPE *rule_head;
3464 rule_head = &sw->recp_list[i].filt_rules;
3465 if (!sw->recp_list[i].adv_rule)
3466 ice_rem_sw_rule_info(hw, rule_head);
3468 ice_rem_adv_rule_info(hw, rule_head);
3473 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3474 * @pi: pointer to the port_info structure
3475 * @vsi_handle: VSI handle to set as default
3476 * @set: true to add the above mentioned switch rule, false to remove it
3477 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3479 * add filter rule to set/unset given VSI as default VSI for the switch
3480 * (represented by swid)
3483 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3486 struct ice_aqc_sw_rules_elem *s_rule;
3487 struct ice_fltr_info f_info;
3488 struct ice_hw *hw = pi->hw;
3489 enum ice_adminq_opc opcode;
3490 enum ice_status status;
3494 if (!ice_is_vsi_valid(hw, vsi_handle))
3495 return ICE_ERR_PARAM;
3496 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3498 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3499 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3500 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3502 return ICE_ERR_NO_MEMORY;
3504 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3506 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3507 f_info.flag = direction;
3508 f_info.fltr_act = ICE_FWD_TO_VSI;
3509 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3511 if (f_info.flag & ICE_FLTR_RX) {
3512 f_info.src = pi->lport;
3513 f_info.src_id = ICE_SRC_ID_LPORT;
3515 f_info.fltr_rule_id =
3516 pi->dflt_rx_vsi_rule_id;
3517 } else if (f_info.flag & ICE_FLTR_TX) {
3518 f_info.src_id = ICE_SRC_ID_VSI;
3519 f_info.src = hw_vsi_id;
3521 f_info.fltr_rule_id =
3522 pi->dflt_tx_vsi_rule_id;
3526 opcode = ice_aqc_opc_add_sw_rules;
3528 opcode = ice_aqc_opc_remove_sw_rules;
3530 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3532 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3533 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3536 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3538 if (f_info.flag & ICE_FLTR_TX) {
3539 pi->dflt_tx_vsi_num = hw_vsi_id;
3540 pi->dflt_tx_vsi_rule_id = index;
3541 } else if (f_info.flag & ICE_FLTR_RX) {
3542 pi->dflt_rx_vsi_num = hw_vsi_id;
3543 pi->dflt_rx_vsi_rule_id = index;
3546 if (f_info.flag & ICE_FLTR_TX) {
3547 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3548 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3549 } else if (f_info.flag & ICE_FLTR_RX) {
3550 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3551 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3556 ice_free(hw, s_rule);
3561 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3562 * @hw: pointer to the hardware structure
3563 * @recp_id: lookup type for which the specified rule needs to be searched
3564 * @f_info: rule information
3566 * Helper function to search for a unicast rule entry - this is to be used
3567 * to remove unicast MAC filter that is not shared with other VSIs on the
3570 * Returns pointer to entry storing the rule if found
3572 static struct ice_fltr_mgmt_list_entry *
3573 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3574 struct ice_fltr_info *f_info)
3576 struct ice_switch_info *sw = hw->switch_info;
3577 struct ice_fltr_mgmt_list_entry *list_itr;
3578 struct LIST_HEAD_TYPE *list_head;
3580 list_head = &sw->recp_list[recp_id].filt_rules;
3581 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3583 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3584 sizeof(f_info->l_data)) &&
3585 f_info->fwd_id.hw_vsi_id ==
3586 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3587 f_info->flag == list_itr->fltr_info.flag)
3594 * ice_remove_mac - remove a MAC address based filter rule
3595 * @hw: pointer to the hardware structure
3596 * @m_list: list of MAC addresses and forwarding information
3598 * This function removes either a MAC filter rule or a specific VSI from a
3599 * VSI list for a multicast MAC address.
3601 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3602 * ice_add_mac. Caller should be aware that this call will only work if all
3603 * the entries passed into m_list were added previously. It will not attempt to
3604 * do a partial remove of entries that were found.
3607 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3609 struct ice_fltr_list_entry *list_itr, *tmp;
3610 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3613 return ICE_ERR_PARAM;
3615 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3616 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3618 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3619 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3622 if (l_type != ICE_SW_LKUP_MAC)
3623 return ICE_ERR_PARAM;
3625 vsi_handle = list_itr->fltr_info.vsi_handle;
3626 if (!ice_is_vsi_valid(hw, vsi_handle))
3627 return ICE_ERR_PARAM;
3629 list_itr->fltr_info.fwd_id.hw_vsi_id =
3630 ice_get_hw_vsi_num(hw, vsi_handle);
3631 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3632 /* Don't remove the unicast address that belongs to
3633 * another VSI on the switch, since it is not being
3636 ice_acquire_lock(rule_lock);
3637 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3638 &list_itr->fltr_info)) {
3639 ice_release_lock(rule_lock);
3640 return ICE_ERR_DOES_NOT_EXIST;
3642 ice_release_lock(rule_lock);
3644 list_itr->status = ice_remove_rule_internal(hw,
3647 if (list_itr->status)
3648 return list_itr->status;
3654 * ice_remove_vlan - Remove VLAN based filter rule
3655 * @hw: pointer to the hardware structure
3656 * @v_list: list of VLAN entries and forwarding information
3659 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3661 struct ice_fltr_list_entry *v_list_itr, *tmp;
3664 return ICE_ERR_PARAM;
3666 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3668 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3670 if (l_type != ICE_SW_LKUP_VLAN)
3671 return ICE_ERR_PARAM;
3672 v_list_itr->status = ice_remove_rule_internal(hw,
3675 if (v_list_itr->status)
3676 return v_list_itr->status;
3682 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3683 * @hw: pointer to the hardware structure
3684 * @v_list: list of MAC VLAN entries and forwarding information
3687 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3689 struct ice_fltr_list_entry *v_list_itr, *tmp;
3692 return ICE_ERR_PARAM;
3694 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3696 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3698 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3699 return ICE_ERR_PARAM;
3700 v_list_itr->status =
3701 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3703 if (v_list_itr->status)
3704 return v_list_itr->status;
3710 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3711 * @fm_entry: filter entry to inspect
3712 * @vsi_handle: VSI handle to compare with filter info
3715 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3717 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3718 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3719 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3720 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3725 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3726 * @hw: pointer to the hardware structure
3727 * @vsi_handle: VSI handle to remove filters from
3728 * @vsi_list_head: pointer to the list to add entry to
3729 * @fi: pointer to fltr_info of filter entry to copy & add
3731 * Helper function, used when creating a list of filters to remove from
3732 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3733 * original filter entry, with the exception of fltr_info.fltr_act and
3734 * fltr_info.fwd_id fields. These are set such that later logic can
3735 * extract which VSI to remove the fltr from, and pass on that information.
3737 static enum ice_status
3738 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3739 struct LIST_HEAD_TYPE *vsi_list_head,
3740 struct ice_fltr_info *fi)
3742 struct ice_fltr_list_entry *tmp;
3744 /* this memory is freed up in the caller function
3745 * once filters for this VSI are removed
3747 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3749 return ICE_ERR_NO_MEMORY;
3751 tmp->fltr_info = *fi;
3753 /* Overwrite these fields to indicate which VSI to remove filter from,
3754 * so find and remove logic can extract the information from the
3755 * list entries. Note that original entries will still have proper
3758 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3759 tmp->fltr_info.vsi_handle = vsi_handle;
3760 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3762 LIST_ADD(&tmp->list_entry, vsi_list_head);
3768 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3769 * @hw: pointer to the hardware structure
3770 * @vsi_handle: VSI handle to remove filters from
3771 * @lkup_list_head: pointer to the list that has certain lookup type filters
3772 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3774 * Locates all filters in lkup_list_head that are used by the given VSI,
3775 * and adds COPIES of those entries to vsi_list_head (intended to be used
3776 * to remove the listed filters).
3777 * Note that this means all entries in vsi_list_head must be explicitly
3778 * deallocated by the caller when done with list.
3780 static enum ice_status
3781 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3782 struct LIST_HEAD_TYPE *lkup_list_head,
3783 struct LIST_HEAD_TYPE *vsi_list_head)
3785 struct ice_fltr_mgmt_list_entry *fm_entry;
3786 enum ice_status status = ICE_SUCCESS;
3788 /* check to make sure VSI ID is valid and within boundary */
3789 if (!ice_is_vsi_valid(hw, vsi_handle))
3790 return ICE_ERR_PARAM;
3792 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3793 ice_fltr_mgmt_list_entry, list_entry) {
3794 struct ice_fltr_info *fi;
3796 fi = &fm_entry->fltr_info;
3797 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3800 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3809 * ice_determine_promisc_mask
3810 * @fi: filter info to parse
3812 * Helper function to determine which ICE_PROMISC_ mask corresponds
3813 * to given filter into.
3815 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3817 u16 vid = fi->l_data.mac_vlan.vlan_id;
3818 u8 *macaddr = fi->l_data.mac.mac_addr;
3819 bool is_tx_fltr = false;
3820 u8 promisc_mask = 0;
3822 if (fi->flag == ICE_FLTR_TX)
3825 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3826 promisc_mask |= is_tx_fltr ?
3827 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3828 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3829 promisc_mask |= is_tx_fltr ?
3830 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3831 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3832 promisc_mask |= is_tx_fltr ?
3833 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3835 promisc_mask |= is_tx_fltr ?
3836 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3838 return promisc_mask;
3842 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3843 * @hw: pointer to the hardware structure
3844 * @vsi_handle: VSI handle to retrieve info from
3845 * @promisc_mask: pointer to mask to be filled in
3846 * @vid: VLAN ID of promisc VLAN VSI
3849 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3852 struct ice_switch_info *sw = hw->switch_info;
3853 struct ice_fltr_mgmt_list_entry *itr;
3854 struct LIST_HEAD_TYPE *rule_head;
3855 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3857 if (!ice_is_vsi_valid(hw, vsi_handle))
3858 return ICE_ERR_PARAM;
3862 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3863 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3865 ice_acquire_lock(rule_lock);
3866 LIST_FOR_EACH_ENTRY(itr, rule_head,
3867 ice_fltr_mgmt_list_entry, list_entry) {
3868 /* Continue if this filter doesn't apply to this VSI or the
3869 * VSI ID is not in the VSI map for this filter
3871 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3874 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3876 ice_release_lock(rule_lock);
3882 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3883 * @hw: pointer to the hardware structure
3884 * @vsi_handle: VSI handle to retrieve info from
3885 * @promisc_mask: pointer to mask to be filled in
3886 * @vid: VLAN ID of promisc VLAN VSI
3889 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3892 struct ice_switch_info *sw = hw->switch_info;
3893 struct ice_fltr_mgmt_list_entry *itr;
3894 struct LIST_HEAD_TYPE *rule_head;
3895 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3897 if (!ice_is_vsi_valid(hw, vsi_handle))
3898 return ICE_ERR_PARAM;
3902 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3903 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3905 ice_acquire_lock(rule_lock);
3906 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3908 /* Continue if this filter doesn't apply to this VSI or the
3909 * VSI ID is not in the VSI map for this filter
3911 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3914 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3916 ice_release_lock(rule_lock);
3922 * ice_remove_promisc - Remove promisc based filter rules
3923 * @hw: pointer to the hardware structure
3924 * @recp_id: recipe ID for which the rule needs to removed
3925 * @v_list: list of promisc entries
3927 static enum ice_status
3928 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3929 struct LIST_HEAD_TYPE *v_list)
3931 struct ice_fltr_list_entry *v_list_itr, *tmp;
3933 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3935 v_list_itr->status =
3936 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3937 if (v_list_itr->status)
3938 return v_list_itr->status;
3944 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3945 * @hw: pointer to the hardware structure
3946 * @vsi_handle: VSI handle to clear mode
3947 * @promisc_mask: mask of promiscuous config bits to clear
3948 * @vid: VLAN ID to clear VLAN promiscuous
3951 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3954 struct ice_switch_info *sw = hw->switch_info;
3955 struct ice_fltr_list_entry *fm_entry, *tmp;
3956 struct LIST_HEAD_TYPE remove_list_head;
3957 struct ice_fltr_mgmt_list_entry *itr;
3958 struct LIST_HEAD_TYPE *rule_head;
3959 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3960 enum ice_status status = ICE_SUCCESS;
3963 if (!ice_is_vsi_valid(hw, vsi_handle))
3964 return ICE_ERR_PARAM;
3966 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3967 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3969 recipe_id = ICE_SW_LKUP_PROMISC;
3971 rule_head = &sw->recp_list[recipe_id].filt_rules;
3972 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3974 INIT_LIST_HEAD(&remove_list_head);
3976 ice_acquire_lock(rule_lock);
3977 LIST_FOR_EACH_ENTRY(itr, rule_head,
3978 ice_fltr_mgmt_list_entry, list_entry) {
3979 struct ice_fltr_info *fltr_info;
3980 u8 fltr_promisc_mask = 0;
3982 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3984 fltr_info = &itr->fltr_info;
3986 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
3987 vid != fltr_info->l_data.mac_vlan.vlan_id)
3990 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
3992 /* Skip if filter is not completely specified by given mask */
3993 if (fltr_promisc_mask & ~promisc_mask)
3996 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4000 ice_release_lock(rule_lock);
4001 goto free_fltr_list;
4004 ice_release_lock(rule_lock);
4006 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4009 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4010 ice_fltr_list_entry, list_entry) {
4011 LIST_DEL(&fm_entry->list_entry);
4012 ice_free(hw, fm_entry);
4019 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4020 * @hw: pointer to the hardware structure
4021 * @vsi_handle: VSI handle to configure
4022 * @promisc_mask: mask of promiscuous config bits
4023 * @vid: VLAN ID to set VLAN promiscuous
4026 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4028 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4029 struct ice_fltr_list_entry f_list_entry;
4030 struct ice_fltr_info new_fltr;
4031 enum ice_status status = ICE_SUCCESS;
4037 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4039 if (!ice_is_vsi_valid(hw, vsi_handle))
4040 return ICE_ERR_PARAM;
4041 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4043 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4045 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4046 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4047 new_fltr.l_data.mac_vlan.vlan_id = vid;
4048 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4050 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4051 recipe_id = ICE_SW_LKUP_PROMISC;
4054 /* Separate filters must be set for each direction/packet type
4055 * combination, so we will loop over the mask value, store the
4056 * individual type, and clear it out in the input mask as it
4059 while (promisc_mask) {
4065 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4066 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4067 pkt_type = UCAST_FLTR;
4068 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4069 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4070 pkt_type = UCAST_FLTR;
4072 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4073 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4074 pkt_type = MCAST_FLTR;
4075 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4076 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4077 pkt_type = MCAST_FLTR;
4079 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4080 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4081 pkt_type = BCAST_FLTR;
4082 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4083 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4084 pkt_type = BCAST_FLTR;
4088 /* Check for VLAN promiscuous flag */
4089 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4090 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4091 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4092 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4096 /* Set filter DA based on packet type */
4097 mac_addr = new_fltr.l_data.mac.mac_addr;
4098 if (pkt_type == BCAST_FLTR) {
4099 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4100 } else if (pkt_type == MCAST_FLTR ||
4101 pkt_type == UCAST_FLTR) {
4102 /* Use the dummy ether header DA */
4103 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4104 ICE_NONDMA_TO_NONDMA);
4105 if (pkt_type == MCAST_FLTR)
4106 mac_addr[0] |= 0x1; /* Set multicast bit */
4109 /* Need to reset this to zero for all iterations */
4112 new_fltr.flag |= ICE_FLTR_TX;
4113 new_fltr.src = hw_vsi_id;
4115 new_fltr.flag |= ICE_FLTR_RX;
4116 new_fltr.src = hw->port_info->lport;
4119 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4120 new_fltr.vsi_handle = vsi_handle;
4121 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4122 f_list_entry.fltr_info = new_fltr;
4124 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4125 if (status != ICE_SUCCESS)
4126 goto set_promisc_exit;
4134 * ice_set_vlan_vsi_promisc
4135 * @hw: pointer to the hardware structure
4136 * @vsi_handle: VSI handle to configure
4137 * @promisc_mask: mask of promiscuous config bits
4138 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4140 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4143 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4144 bool rm_vlan_promisc)
4146 struct ice_switch_info *sw = hw->switch_info;
4147 struct ice_fltr_list_entry *list_itr, *tmp;
4148 struct LIST_HEAD_TYPE vsi_list_head;
4149 struct LIST_HEAD_TYPE *vlan_head;
4150 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4151 enum ice_status status;
4154 INIT_LIST_HEAD(&vsi_list_head);
4155 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4156 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4157 ice_acquire_lock(vlan_lock);
4158 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4160 ice_release_lock(vlan_lock);
4162 goto free_fltr_list;
4164 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4166 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4167 if (rm_vlan_promisc)
4168 status = ice_clear_vsi_promisc(hw, vsi_handle,
4169 promisc_mask, vlan_id);
4171 status = ice_set_vsi_promisc(hw, vsi_handle,
4172 promisc_mask, vlan_id);
4178 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4179 ice_fltr_list_entry, list_entry) {
4180 LIST_DEL(&list_itr->list_entry);
4181 ice_free(hw, list_itr);
4187 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4188 * @hw: pointer to the hardware structure
4189 * @vsi_handle: VSI handle to remove filters from
4190 * @lkup: switch rule filter lookup type
4193 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4194 enum ice_sw_lkup_type lkup)
4196 struct ice_switch_info *sw = hw->switch_info;
4197 struct ice_fltr_list_entry *fm_entry;
4198 struct LIST_HEAD_TYPE remove_list_head;
4199 struct LIST_HEAD_TYPE *rule_head;
4200 struct ice_fltr_list_entry *tmp;
4201 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4202 enum ice_status status;
4204 INIT_LIST_HEAD(&remove_list_head);
4205 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4206 rule_head = &sw->recp_list[lkup].filt_rules;
4207 ice_acquire_lock(rule_lock);
4208 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4210 ice_release_lock(rule_lock);
4215 case ICE_SW_LKUP_MAC:
4216 ice_remove_mac(hw, &remove_list_head);
4218 case ICE_SW_LKUP_VLAN:
4219 ice_remove_vlan(hw, &remove_list_head);
4221 case ICE_SW_LKUP_PROMISC:
4222 case ICE_SW_LKUP_PROMISC_VLAN:
4223 ice_remove_promisc(hw, lkup, &remove_list_head);
4225 case ICE_SW_LKUP_MAC_VLAN:
4226 ice_remove_mac_vlan(hw, &remove_list_head);
4228 case ICE_SW_LKUP_ETHERTYPE:
4229 case ICE_SW_LKUP_ETHERTYPE_MAC:
4230 ice_remove_eth_mac(hw, &remove_list_head);
4232 case ICE_SW_LKUP_DFLT:
4233 ice_debug(hw, ICE_DBG_SW,
4234 "Remove filters for this lookup type hasn't been implemented yet\n");
4236 case ICE_SW_LKUP_LAST:
4237 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4241 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4242 ice_fltr_list_entry, list_entry) {
4243 LIST_DEL(&fm_entry->list_entry);
4244 ice_free(hw, fm_entry);
4249 * ice_remove_vsi_fltr - Remove all filters for a VSI
4250 * @hw: pointer to the hardware structure
4251 * @vsi_handle: VSI handle to remove filters from
4253 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4255 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4257 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4258 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4259 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4260 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4261 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4262 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4263 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4264 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4268 * ice_alloc_res_cntr - allocating resource counter
4269 * @hw: pointer to the hardware structure
4270 * @type: type of resource
4271 * @alloc_shared: if set it is shared else dedicated
4272 * @num_items: number of entries requested for FD resource type
4273 * @counter_id: counter index returned by AQ call
4276 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4279 struct ice_aqc_alloc_free_res_elem *buf;
4280 enum ice_status status;
4283 /* Allocate resource */
4284 buf_len = sizeof(*buf);
4285 buf = (struct ice_aqc_alloc_free_res_elem *)
4286 ice_malloc(hw, buf_len);
4288 return ICE_ERR_NO_MEMORY;
4290 buf->num_elems = CPU_TO_LE16(num_items);
4291 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4292 ICE_AQC_RES_TYPE_M) | alloc_shared);
4294 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4295 ice_aqc_opc_alloc_res, NULL);
4299 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4307 * ice_free_res_cntr - free resource counter
4308 * @hw: pointer to the hardware structure
4309 * @type: type of resource
4310 * @alloc_shared: if set it is shared else dedicated
4311 * @num_items: number of entries to be freed for FD resource type
4312 * @counter_id: counter ID resource which needs to be freed
4315 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4318 struct ice_aqc_alloc_free_res_elem *buf;
4319 enum ice_status status;
4323 buf_len = sizeof(*buf);
4324 buf = (struct ice_aqc_alloc_free_res_elem *)
4325 ice_malloc(hw, buf_len);
4327 return ICE_ERR_NO_MEMORY;
4329 buf->num_elems = CPU_TO_LE16(num_items);
4330 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4331 ICE_AQC_RES_TYPE_M) | alloc_shared);
4332 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4334 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4335 ice_aqc_opc_free_res, NULL);
4337 ice_debug(hw, ICE_DBG_SW,
4338 "counter resource could not be freed\n");
4345 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4346 * @hw: pointer to the hardware structure
4347 * @counter_id: returns counter index
4349 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4351 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4352 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4357 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4358 * @hw: pointer to the hardware structure
4359 * @counter_id: counter index to be freed
4361 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4363 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4364 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4369 * ice_alloc_res_lg_act - add large action resource
4370 * @hw: pointer to the hardware structure
4371 * @l_id: large action ID to fill it in
4372 * @num_acts: number of actions to hold with a large action entry
4374 static enum ice_status
4375 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4377 struct ice_aqc_alloc_free_res_elem *sw_buf;
4378 enum ice_status status;
4381 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4382 return ICE_ERR_PARAM;
4384 /* Allocate resource for large action */
4385 buf_len = sizeof(*sw_buf);
4386 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4387 ice_malloc(hw, buf_len);
4389 return ICE_ERR_NO_MEMORY;
4391 sw_buf->num_elems = CPU_TO_LE16(1);
4393 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4394 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4395 * If num_acts is greater than 2, then use
4396 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4397 * The num_acts cannot exceed 4. This was ensured at the
4398 * beginning of the function.
4401 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4402 else if (num_acts == 2)
4403 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4405 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4407 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4408 ice_aqc_opc_alloc_res, NULL);
4410 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4412 ice_free(hw, sw_buf);
4417 * ice_add_mac_with_sw_marker - add filter with sw marker
4418 * @hw: pointer to the hardware structure
4419 * @f_info: filter info structure containing the MAC filter information
4420 * @sw_marker: sw marker to tag the Rx descriptor with
4423 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4426 struct ice_switch_info *sw = hw->switch_info;
4427 struct ice_fltr_mgmt_list_entry *m_entry;
4428 struct ice_fltr_list_entry fl_info;
4429 struct LIST_HEAD_TYPE l_head;
4430 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4431 enum ice_status ret;
4435 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4436 return ICE_ERR_PARAM;
4438 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4439 return ICE_ERR_PARAM;
4441 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4442 return ICE_ERR_PARAM;
4444 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4445 return ICE_ERR_PARAM;
4446 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4448 /* Add filter if it doesn't exist so then the adding of large
4449 * action always results in update
4452 INIT_LIST_HEAD(&l_head);
4453 fl_info.fltr_info = *f_info;
4454 LIST_ADD(&fl_info.list_entry, &l_head);
4456 entry_exists = false;
4457 ret = ice_add_mac(hw, &l_head);
4458 if (ret == ICE_ERR_ALREADY_EXISTS)
4459 entry_exists = true;
4463 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4464 ice_acquire_lock(rule_lock);
4465 /* Get the book keeping entry for the filter */
4466 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4470 /* If counter action was enabled for this rule then don't enable
4471 * sw marker large action
4473 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4474 ret = ICE_ERR_PARAM;
4478 /* if same marker was added before */
4479 if (m_entry->sw_marker_id == sw_marker) {
4480 ret = ICE_ERR_ALREADY_EXISTS;
4484 /* Allocate a hardware table entry to hold large act. Three actions
4485 * for marker based large action
4487 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4491 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4494 /* Update the switch rule to add the marker action */
4495 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4497 ice_release_lock(rule_lock);
4502 ice_release_lock(rule_lock);
4503 /* only remove entry if it did not exist previously */
4505 ret = ice_remove_mac(hw, &l_head);
4511 * ice_add_mac_with_counter - add filter with counter enabled
4512 * @hw: pointer to the hardware structure
4513 * @f_info: pointer to filter info structure containing the MAC filter
4517 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4519 struct ice_switch_info *sw = hw->switch_info;
4520 struct ice_fltr_mgmt_list_entry *m_entry;
4521 struct ice_fltr_list_entry fl_info;
4522 struct LIST_HEAD_TYPE l_head;
4523 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4524 enum ice_status ret;
4529 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4530 return ICE_ERR_PARAM;
4532 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4533 return ICE_ERR_PARAM;
4535 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4536 return ICE_ERR_PARAM;
4537 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4539 entry_exist = false;
4541 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4543 /* Add filter if it doesn't exist so then the adding of large
4544 * action always results in update
4546 INIT_LIST_HEAD(&l_head);
4548 fl_info.fltr_info = *f_info;
4549 LIST_ADD(&fl_info.list_entry, &l_head);
4551 ret = ice_add_mac(hw, &l_head);
4552 if (ret == ICE_ERR_ALREADY_EXISTS)
4557 ice_acquire_lock(rule_lock);
4558 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4560 ret = ICE_ERR_BAD_PTR;
4564 /* Don't enable counter for a filter for which sw marker was enabled */
4565 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4566 ret = ICE_ERR_PARAM;
4570 /* If a counter was already enabled then don't need to add again */
4571 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4572 ret = ICE_ERR_ALREADY_EXISTS;
4576 /* Allocate a hardware table entry to VLAN counter */
4577 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4581 /* Allocate a hardware table entry to hold large act. Two actions for
4582 * counter based large action
4584 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4588 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4591 /* Update the switch rule to add the counter action */
4592 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4594 ice_release_lock(rule_lock);
4599 ice_release_lock(rule_lock);
4600 /* only remove entry if it did not exist previously */
4602 ret = ice_remove_mac(hw, &l_head);
4607 /* This is mapping table entry that maps every word within a given protocol
4608 * structure to the real byte offset as per the specification of that
4610 * for example dst address is 3 words in ethertype header and corresponding
4611 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4612 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4613 * matching entry describing its field. This needs to be updated if new
4614 * structure is added to that union.
4616 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4617 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4618 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4619 { ICE_ETYPE_OL, { 0 } },
4620 { ICE_VLAN_OFOS, { 0, 2 } },
4621 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4622 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4623 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4624 26, 28, 30, 32, 34, 36, 38 } },
4625 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4626 26, 28, 30, 32, 34, 36, 38 } },
4627 { ICE_TCP_IL, { 0, 2 } },
4628 { ICE_UDP_OF, { 0, 2 } },
4629 { ICE_UDP_ILOS, { 0, 2 } },
4630 { ICE_SCTP_IL, { 0, 2 } },
4631 { ICE_VXLAN, { 8, 10, 12, 14 } },
4632 { ICE_GENEVE, { 8, 10, 12, 14 } },
4633 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4634 { ICE_NVGRE, { 0, 2, 4, 6 } },
4635 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4636 { ICE_PPPOE, { 0, 2, 4, 6 } },
4639 /* The following table describes preferred grouping of recipes.
4640 * If a recipe that needs to be programmed is a superset or matches one of the
4641 * following combinations, then the recipe needs to be chained as per the
4645 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4646 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4647 { ICE_MAC_IL, ICE_MAC_IL_HW },
4648 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4649 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4650 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4651 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4652 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4653 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4654 { ICE_TCP_IL, ICE_TCP_IL_HW },
4655 { ICE_UDP_OF, ICE_UDP_OF_HW },
4656 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4657 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4658 { ICE_VXLAN, ICE_UDP_OF_HW },
4659 { ICE_GENEVE, ICE_UDP_OF_HW },
4660 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4661 { ICE_NVGRE, ICE_GRE_OF_HW },
4662 { ICE_GTP, ICE_UDP_OF_HW },
4663 { ICE_PPPOE, ICE_PPPOE_HW },
4667 * ice_find_recp - find a recipe
4668 * @hw: pointer to the hardware structure
4669 * @lkup_exts: extension sequence to match
4671 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4673 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4675 bool refresh_required = true;
4676 struct ice_sw_recipe *recp;
4679 /* Walk through existing recipes to find a match */
4680 recp = hw->switch_info->recp_list;
4681 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4682 /* If recipe was not created for this ID, in SW bookkeeping,
4683 * check if FW has an entry for this recipe. If the FW has an
4684 * entry update it in our SW bookkeeping and continue with the
4687 if (!recp[i].recp_created)
4688 if (ice_get_recp_frm_fw(hw,
4689 hw->switch_info->recp_list, i,
4693 /* Skip inverse action recipes */
4694 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4695 ICE_AQ_RECIPE_ACT_INV_ACT)
4698 /* if number of words we are looking for match */
4699 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4700 struct ice_fv_word *a = lkup_exts->fv_words;
4701 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4705 for (p = 0; p < lkup_exts->n_val_words; p++) {
4706 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4708 if (a[p].off == b[q].off &&
4709 a[p].prot_id == b[q].prot_id)
4710 /* Found the "p"th word in the
4715 /* After walking through all the words in the
4716 * "i"th recipe if "p"th word was not found then
4717 * this recipe is not what we are looking for.
4718 * So break out from this loop and try the next
4721 if (q >= recp[i].lkup_exts.n_val_words) {
4726 /* If for "i"th recipe the found was never set to false
4727 * then it means we found our match
4730 return i; /* Return the recipe ID */
4733 return ICE_MAX_NUM_RECIPES;
4737 * ice_prot_type_to_id - get protocol ID from protocol type
4738 * @type: protocol type
4739 * @id: pointer to variable that will receive the ID
4741 * Returns true if found, false otherwise
4743 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4747 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4748 if (ice_prot_id_tbl[i].type == type) {
4749 *id = ice_prot_id_tbl[i].protocol_id;
4756 * ice_find_valid_words - count valid words
4757 * @rule: advanced rule with lookup information
4758 * @lkup_exts: byte offset extractions of the words that are valid
4760 * calculate valid words in a lookup rule using mask value
4763 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4764 struct ice_prot_lkup_ext *lkup_exts)
4770 if (!ice_prot_type_to_id(rule->type, &prot_id))
4773 word = lkup_exts->n_val_words;
4775 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4776 if (((u16 *)&rule->m_u)[j] &&
4777 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4778 /* No more space to accommodate */
4779 if (word >= ICE_MAX_CHAIN_WORDS)
4781 lkup_exts->fv_words[word].off =
4782 ice_prot_ext[rule->type].offs[j];
4783 lkup_exts->fv_words[word].prot_id =
4784 ice_prot_id_tbl[rule->type].protocol_id;
4785 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4789 ret_val = word - lkup_exts->n_val_words;
4790 lkup_exts->n_val_words = word;
4796 * ice_create_first_fit_recp_def - Create a recipe grouping
4797 * @hw: pointer to the hardware structure
4798 * @lkup_exts: an array of protocol header extractions
4799 * @rg_list: pointer to a list that stores new recipe groups
4800 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4802 * Using first fit algorithm, take all the words that are still not done
4803 * and start grouping them in 4-word groups. Each group makes up one
4806 static enum ice_status
4807 ice_create_first_fit_recp_def(struct ice_hw *hw,
4808 struct ice_prot_lkup_ext *lkup_exts,
4809 struct LIST_HEAD_TYPE *rg_list,
4812 struct ice_pref_recipe_group *grp = NULL;
4817 /* Walk through every word in the rule to check if it is not done. If so
4818 * then this word needs to be part of a new recipe.
4820 for (j = 0; j < lkup_exts->n_val_words; j++)
4821 if (!ice_is_bit_set(lkup_exts->done, j)) {
4823 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4824 struct ice_recp_grp_entry *entry;
4826 entry = (struct ice_recp_grp_entry *)
4827 ice_malloc(hw, sizeof(*entry));
4829 return ICE_ERR_NO_MEMORY;
4830 LIST_ADD(&entry->l_entry, rg_list);
4831 grp = &entry->r_group;
4835 grp->pairs[grp->n_val_pairs].prot_id =
4836 lkup_exts->fv_words[j].prot_id;
4837 grp->pairs[grp->n_val_pairs].off =
4838 lkup_exts->fv_words[j].off;
4839 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4847 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4848 * @hw: pointer to the hardware structure
4849 * @fv_list: field vector with the extraction sequence information
4850 * @rg_list: recipe groupings with protocol-offset pairs
4852 * Helper function to fill in the field vector indices for protocol-offset
4853 * pairs. These indexes are then ultimately programmed into a recipe.
4855 static enum ice_status
4856 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4857 struct LIST_HEAD_TYPE *rg_list)
4859 struct ice_sw_fv_list_entry *fv;
4860 struct ice_recp_grp_entry *rg;
4861 struct ice_fv_word *fv_ext;
4863 if (LIST_EMPTY(fv_list))
4866 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4867 fv_ext = fv->fv_ptr->ew;
4869 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4872 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4873 struct ice_fv_word *pr;
4878 pr = &rg->r_group.pairs[i];
4879 mask = rg->r_group.mask[i];
4881 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4882 if (fv_ext[j].prot_id == pr->prot_id &&
4883 fv_ext[j].off == pr->off) {
4886 /* Store index of field vector */
4888 /* Mask is given by caller as big
4889 * endian, but sent to FW as little
4892 rg->fv_mask[i] = mask << 8 | mask >> 8;
4896 /* Protocol/offset could not be found, caller gave an
4900 return ICE_ERR_PARAM;
4908 * ice_find_free_recp_res_idx - find free result indexes for recipe
4909 * @hw: pointer to hardware structure
4910 * @profiles: bitmap of profiles that will be associated with the new recipe
4911 * @free_idx: pointer to variable to receive the free index bitmap
4913 * The algorithm used here is:
4914 * 1. When creating a new recipe, create a set P which contains all
4915 * Profiles that will be associated with our new recipe
4917 * 2. For each Profile p in set P:
4918 * a. Add all recipes associated with Profile p into set R
4919 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4920 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4921 * i. Or just assume they all have the same possible indexes:
4923 * i.e., PossibleIndexes = 0x0000F00000000000
4925 * 3. For each Recipe r in set R:
4926 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4927 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4929 * FreeIndexes will contain the bits indicating the indexes free for use,
4930 * then the code needs to update the recipe[r].used_result_idx_bits to
4931 * indicate which indexes were selected for use by this recipe.
4934 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
4935 ice_bitmap_t *free_idx)
4937 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4938 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4939 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
4943 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4944 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4945 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
4946 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
4948 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
4949 ice_set_bit(count, possible_idx);
4951 /* For each profile we are going to associate the recipe with, add the
4952 * recipes that are associated with that profile. This will give us
4953 * the set of recipes that our recipe may collide with. Also, determine
4954 * what possible result indexes are usable given this set of profiles.
4957 while (ICE_MAX_NUM_PROFILES >
4958 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
4959 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
4960 ICE_MAX_NUM_RECIPES);
4961 ice_and_bitmap(possible_idx, possible_idx,
4962 hw->switch_info->prof_res_bm[bit],
4967 /* For each recipe that our new recipe may collide with, determine
4968 * which indexes have been used.
4970 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
4971 if (ice_is_bit_set(recipes, bit)) {
4972 ice_or_bitmap(used_idx, used_idx,
4973 hw->switch_info->recp_list[bit].res_idxs,
4977 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4979 /* return number of free indexes */
4982 while (ICE_MAX_FV_WORDS >
4983 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
4992 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4993 * @hw: pointer to hardware structure
4994 * @rm: recipe management list entry
4995 * @match_tun: if field vector index for tunnel needs to be programmed
4996 * @profiles: bitmap of profiles that will be assocated.
4998 static enum ice_status
4999 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5000 bool match_tun, ice_bitmap_t *profiles)
5002 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5003 struct ice_aqc_recipe_data_elem *tmp;
5004 struct ice_aqc_recipe_data_elem *buf;
5005 struct ice_recp_grp_entry *entry;
5006 enum ice_status status;
5012 /* When more than one recipe are required, another recipe is needed to
5013 * chain them together. Matching a tunnel metadata ID takes up one of
5014 * the match fields in the chaining recipe reducing the number of
5015 * chained recipes by one.
5017 /* check number of free result indices */
5018 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5019 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5021 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5022 free_res_idx, rm->n_grp_count);
5024 if (rm->n_grp_count > 1) {
5025 if (rm->n_grp_count > free_res_idx)
5026 return ICE_ERR_MAX_LIMIT;
5031 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5032 ICE_MAX_NUM_RECIPES,
5035 return ICE_ERR_NO_MEMORY;
5037 buf = (struct ice_aqc_recipe_data_elem *)
5038 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5040 status = ICE_ERR_NO_MEMORY;
5044 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5045 recipe_count = ICE_MAX_NUM_RECIPES;
5046 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5048 if (status || recipe_count == 0)
5051 /* Allocate the recipe resources, and configure them according to the
5052 * match fields from protocol headers and extracted field vectors.
5054 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5055 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5058 status = ice_alloc_recipe(hw, &entry->rid);
5062 /* Clear the result index of the located recipe, as this will be
5063 * updated, if needed, later in the recipe creation process.
5065 tmp[0].content.result_indx = 0;
5067 buf[recps] = tmp[0];
5068 buf[recps].recipe_indx = (u8)entry->rid;
5069 /* if the recipe is a non-root recipe RID should be programmed
5070 * as 0 for the rules to be applied correctly.
5072 buf[recps].content.rid = 0;
5073 ice_memset(&buf[recps].content.lkup_indx, 0,
5074 sizeof(buf[recps].content.lkup_indx),
5077 /* All recipes use look-up index 0 to match switch ID. */
5078 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5079 buf[recps].content.mask[0] =
5080 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5081 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5084 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5085 buf[recps].content.lkup_indx[i] = 0x80;
5086 buf[recps].content.mask[i] = 0;
5089 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5090 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5091 buf[recps].content.mask[i + 1] =
5092 CPU_TO_LE16(entry->fv_mask[i]);
5095 if (rm->n_grp_count > 1) {
5096 /* Checks to see if there really is a valid result index
5099 if (chain_idx >= ICE_MAX_FV_WORDS) {
5100 ice_debug(hw, ICE_DBG_SW,
5101 "No chain index available\n");
5102 status = ICE_ERR_MAX_LIMIT;
5106 entry->chain_idx = chain_idx;
5107 buf[recps].content.result_indx =
5108 ICE_AQ_RECIPE_RESULT_EN |
5109 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5110 ICE_AQ_RECIPE_RESULT_DATA_M);
5111 ice_clear_bit(chain_idx, result_idx_bm);
5112 chain_idx = ice_find_first_bit(result_idx_bm,
5116 /* fill recipe dependencies */
5117 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5118 ICE_MAX_NUM_RECIPES);
5119 ice_set_bit(buf[recps].recipe_indx,
5120 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5121 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5125 if (rm->n_grp_count == 1) {
5126 rm->root_rid = buf[0].recipe_indx;
5127 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5128 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5129 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5130 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5131 sizeof(buf[0].recipe_bitmap),
5132 ICE_NONDMA_TO_NONDMA);
5134 status = ICE_ERR_BAD_PTR;
5137 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5138 * the recipe which is getting created if specified
5139 * by user. Usually any advanced switch filter, which results
5140 * into new extraction sequence, ended up creating a new recipe
5141 * of type ROOT and usually recipes are associated with profiles
5142 * Switch rule referreing newly created recipe, needs to have
5143 * either/or 'fwd' or 'join' priority, otherwise switch rule
5144 * evaluation will not happen correctly. In other words, if
5145 * switch rule to be evaluated on priority basis, then recipe
5146 * needs to have priority, otherwise it will be evaluated last.
5148 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5150 struct ice_recp_grp_entry *last_chain_entry;
5153 /* Allocate the last recipe that will chain the outcomes of the
5154 * other recipes together
5156 status = ice_alloc_recipe(hw, &rid);
5160 buf[recps].recipe_indx = (u8)rid;
5161 buf[recps].content.rid = (u8)rid;
5162 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5163 /* the new entry created should also be part of rg_list to
5164 * make sure we have complete recipe
5166 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5167 sizeof(*last_chain_entry));
5168 if (!last_chain_entry) {
5169 status = ICE_ERR_NO_MEMORY;
5172 last_chain_entry->rid = rid;
5173 ice_memset(&buf[recps].content.lkup_indx, 0,
5174 sizeof(buf[recps].content.lkup_indx),
5176 /* All recipes use look-up index 0 to match switch ID. */
5177 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5178 buf[recps].content.mask[0] =
5179 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5180 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5181 buf[recps].content.lkup_indx[i] =
5182 ICE_AQ_RECIPE_LKUP_IGNORE;
5183 buf[recps].content.mask[i] = 0;
5187 /* update r_bitmap with the recp that is used for chaining */
5188 ice_set_bit(rid, rm->r_bitmap);
5189 /* this is the recipe that chains all the other recipes so it
5190 * should not have a chaining ID to indicate the same
5192 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5193 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5195 last_chain_entry->fv_idx[i] = entry->chain_idx;
5196 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5197 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5198 ice_set_bit(entry->rid, rm->r_bitmap);
5200 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5201 if (sizeof(buf[recps].recipe_bitmap) >=
5202 sizeof(rm->r_bitmap)) {
5203 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5204 sizeof(buf[recps].recipe_bitmap),
5205 ICE_NONDMA_TO_NONDMA);
5207 status = ICE_ERR_BAD_PTR;
5210 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5212 /* To differentiate among different UDP tunnels, a meta data ID
5216 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5217 buf[recps].content.mask[i] =
5218 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5222 rm->root_rid = (u8)rid;
5224 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5228 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5229 ice_release_change_lock(hw);
5233 /* Every recipe that just got created add it to the recipe
5236 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5237 struct ice_switch_info *sw = hw->switch_info;
5238 bool is_root, idx_found = false;
5239 struct ice_sw_recipe *recp;
5240 u16 idx, buf_idx = 0;
5242 /* find buffer index for copying some data */
5243 for (idx = 0; idx < rm->n_grp_count; idx++)
5244 if (buf[idx].recipe_indx == entry->rid) {
5250 status = ICE_ERR_OUT_OF_RANGE;
5254 recp = &sw->recp_list[entry->rid];
5255 is_root = (rm->root_rid == entry->rid);
5256 recp->is_root = is_root;
5258 recp->root_rid = entry->rid;
5259 recp->big_recp = (is_root && rm->n_grp_count > 1);
5261 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5262 entry->r_group.n_val_pairs *
5263 sizeof(struct ice_fv_word),
5264 ICE_NONDMA_TO_NONDMA);
5266 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5267 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5269 /* Copy non-result fv index values and masks to recipe. This
5270 * call will also update the result recipe bitmask.
5272 ice_collect_result_idx(&buf[buf_idx], recp);
5274 /* for non-root recipes, also copy to the root, this allows
5275 * easier matching of a complete chained recipe
5278 ice_collect_result_idx(&buf[buf_idx],
5279 &sw->recp_list[rm->root_rid]);
5281 recp->n_ext_words = entry->r_group.n_val_pairs;
5282 recp->chain_idx = entry->chain_idx;
5283 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5284 recp->n_grp_count = rm->n_grp_count;
5285 recp->tun_type = rm->tun_type;
5286 recp->recp_created = true;
5301 * ice_create_recipe_group - creates recipe group
5302 * @hw: pointer to hardware structure
5303 * @rm: recipe management list entry
5304 * @lkup_exts: lookup elements
5306 static enum ice_status
5307 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5308 struct ice_prot_lkup_ext *lkup_exts)
5310 enum ice_status status;
5313 rm->n_grp_count = 0;
5315 /* Create recipes for words that are marked not done by packing them
5318 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5319 &rm->rg_list, &recp_count);
5321 rm->n_grp_count += recp_count;
5322 rm->n_ext_words = lkup_exts->n_val_words;
5323 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5324 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5325 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5326 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5333 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5334 * @hw: pointer to hardware structure
5335 * @lkups: lookup elements or match criteria for the advanced recipe, one
5336 * structure per protocol header
5337 * @lkups_cnt: number of protocols
5338 * @bm: bitmap of field vectors to consider
5339 * @fv_list: pointer to a list that holds the returned field vectors
5341 static enum ice_status
5342 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5343 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5345 enum ice_status status;
5349 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5351 return ICE_ERR_NO_MEMORY;
5353 for (i = 0; i < lkups_cnt; i++)
5354 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5355 status = ICE_ERR_CFG;
5359 /* Find field vectors that include all specified protocol types */
5360 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5363 ice_free(hw, prot_ids);
5368 * ice_add_special_words - Add words that are not protocols, such as metadata
5369 * @rinfo: other information regarding the rule e.g. priority and action info
5370 * @lkup_exts: lookup word structure
5372 static enum ice_status
5373 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5374 struct ice_prot_lkup_ext *lkup_exts)
5376 /* If this is a tunneled packet, then add recipe index to match the
5377 * tunnel bit in the packet metadata flags.
5379 if (rinfo->tun_type != ICE_NON_TUN) {
5380 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5381 u8 word = lkup_exts->n_val_words++;
5383 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5384 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5386 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5388 return ICE_ERR_MAX_LIMIT;
5395 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5396 * @hw: pointer to hardware structure
5397 * @rinfo: other information regarding the rule e.g. priority and action info
5398 * @bm: pointer to memory for returning the bitmap of field vectors
5401 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5404 enum ice_prof_type type;
5406 switch (rinfo->tun_type) {
5408 type = ICE_PROF_NON_TUN;
5410 case ICE_ALL_TUNNELS:
5411 type = ICE_PROF_TUN_ALL;
5413 case ICE_SW_TUN_VXLAN_GPE:
5414 case ICE_SW_TUN_GENEVE:
5415 case ICE_SW_TUN_VXLAN:
5416 case ICE_SW_TUN_UDP:
5417 case ICE_SW_TUN_GTP:
5418 type = ICE_PROF_TUN_UDP;
5420 case ICE_SW_TUN_NVGRE:
5421 type = ICE_PROF_TUN_GRE;
5423 case ICE_SW_TUN_PPPOE:
5424 type = ICE_PROF_TUN_PPPOE;
5426 case ICE_SW_TUN_AND_NON_TUN:
5428 type = ICE_PROF_ALL;
5432 ice_get_sw_fv_bitmap(hw, type, bm);
5436 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5437 * @hw: pointer to hardware structure
5438 * @lkups: lookup elements or match criteria for the advanced recipe, one
5439 * structure per protocol header
5440 * @lkups_cnt: number of protocols
5441 * @rinfo: other information regarding the rule e.g. priority and action info
5442 * @rid: return the recipe ID of the recipe created
5444 static enum ice_status
5445 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5446 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5448 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5449 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5450 struct ice_prot_lkup_ext *lkup_exts;
5451 struct ice_recp_grp_entry *r_entry;
5452 struct ice_sw_fv_list_entry *fvit;
5453 struct ice_recp_grp_entry *r_tmp;
5454 struct ice_sw_fv_list_entry *tmp;
5455 enum ice_status status = ICE_SUCCESS;
5456 struct ice_sw_recipe *rm;
5457 bool match_tun = false;
5461 return ICE_ERR_PARAM;
5463 lkup_exts = (struct ice_prot_lkup_ext *)
5464 ice_malloc(hw, sizeof(*lkup_exts));
5466 return ICE_ERR_NO_MEMORY;
5468 /* Determine the number of words to be matched and if it exceeds a
5469 * recipe's restrictions
5471 for (i = 0; i < lkups_cnt; i++) {
5474 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5475 status = ICE_ERR_CFG;
5476 goto err_free_lkup_exts;
5479 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5481 status = ICE_ERR_CFG;
5482 goto err_free_lkup_exts;
5486 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5488 status = ICE_ERR_NO_MEMORY;
5489 goto err_free_lkup_exts;
5492 /* Get field vectors that contain fields extracted from all the protocol
5493 * headers being programmed.
5495 INIT_LIST_HEAD(&rm->fv_list);
5496 INIT_LIST_HEAD(&rm->rg_list);
5498 /* Get bitmap of field vectors (profiles) that are compatible with the
5499 * rule request; only these will be searched in the subsequent call to
5502 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5504 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5508 /* Group match words into recipes using preferred recipe grouping
5511 status = ice_create_recipe_group(hw, rm, lkup_exts);
5515 /* There is only profile for UDP tunnels. So, it is necessary to use a
5516 * metadata ID flag to differentiate different tunnel types. A separate
5517 * recipe needs to be used for the metadata.
5519 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5520 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5521 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5524 /* set the recipe priority if specified */
5525 rm->priority = rinfo->priority ? rinfo->priority : 0;
5527 /* Find offsets from the field vector. Pick the first one for all the
5530 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5534 /* get bitmap of all profiles the recipe will be associated with */
5535 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5536 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5538 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5539 ice_set_bit((u16)fvit->profile_id, profiles);
5542 /* Create any special protocol/offset pairs, such as looking at tunnel
5543 * bits by extracting metadata
5545 status = ice_add_special_words(rinfo, lkup_exts);
5547 goto err_free_lkup_exts;
5549 /* Look for a recipe which matches our requested fv / mask list */
5550 *rid = ice_find_recp(hw, lkup_exts);
5551 if (*rid < ICE_MAX_NUM_RECIPES)
5552 /* Success if found a recipe that match the existing criteria */
5555 /* Recipe we need does not exist, add a recipe */
5556 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5560 /* Associate all the recipes created with all the profiles in the
5561 * common field vector.
5563 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5565 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5568 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5569 (u8 *)r_bitmap, NULL);
5573 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5574 ICE_MAX_NUM_RECIPES);
5575 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5579 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5582 ice_release_change_lock(hw);
5587 /* Update profile to recipe bitmap array */
5588 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5589 ICE_MAX_NUM_RECIPES);
5591 /* Update recipe to profile bitmap array */
5592 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5593 if (ice_is_bit_set(r_bitmap, j))
5594 ice_set_bit((u16)fvit->profile_id,
5595 recipe_to_profile[j]);
5598 *rid = rm->root_rid;
5599 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5600 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5602 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5603 ice_recp_grp_entry, l_entry) {
5604 LIST_DEL(&r_entry->l_entry);
5605 ice_free(hw, r_entry);
5608 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5610 LIST_DEL(&fvit->list_entry);
5615 ice_free(hw, rm->root_buf);
5620 ice_free(hw, lkup_exts);
5626 * ice_find_dummy_packet - find dummy packet by tunnel type
5628 * @lkups: lookup elements or match criteria for the advanced recipe, one
5629 * structure per protocol header
5630 * @lkups_cnt: number of protocols
5631 * @tun_type: tunnel type from the match criteria
5632 * @pkt: dummy packet to fill according to filter match criteria
5633 * @pkt_len: packet length of dummy packet
5634 * @offsets: pointer to receive the pointer to the offsets for the packet
5637 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5638 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5640 const struct ice_dummy_pkt_offsets **offsets)
5642 bool tcp = false, udp = false, ipv6 = false;
5645 if (tun_type == ICE_SW_TUN_GTP) {
5646 *pkt = dummy_udp_gtp_packet;
5647 *pkt_len = sizeof(dummy_udp_gtp_packet);
5648 *offsets = dummy_udp_gtp_packet_offsets;
5651 if (tun_type == ICE_SW_TUN_PPPOE) {
5652 *pkt = dummy_pppoe_packet;
5653 *pkt_len = sizeof(dummy_pppoe_packet);
5654 *offsets = dummy_pppoe_packet_offsets;
5657 for (i = 0; i < lkups_cnt; i++) {
5658 if (lkups[i].type == ICE_UDP_ILOS)
5660 else if (lkups[i].type == ICE_TCP_IL)
5662 else if (lkups[i].type == ICE_IPV6_OFOS)
5666 if (tun_type == ICE_ALL_TUNNELS) {
5667 *pkt = dummy_gre_udp_packet;
5668 *pkt_len = sizeof(dummy_gre_udp_packet);
5669 *offsets = dummy_gre_udp_packet_offsets;
5673 if (tun_type == ICE_SW_TUN_NVGRE) {
5675 *pkt = dummy_gre_tcp_packet;
5676 *pkt_len = sizeof(dummy_gre_tcp_packet);
5677 *offsets = dummy_gre_tcp_packet_offsets;
5681 *pkt = dummy_gre_udp_packet;
5682 *pkt_len = sizeof(dummy_gre_udp_packet);
5683 *offsets = dummy_gre_udp_packet_offsets;
5687 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5688 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5690 *pkt = dummy_udp_tun_tcp_packet;
5691 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5692 *offsets = dummy_udp_tun_tcp_packet_offsets;
5696 *pkt = dummy_udp_tun_udp_packet;
5697 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5698 *offsets = dummy_udp_tun_udp_packet_offsets;
5703 *pkt = dummy_udp_packet;
5704 *pkt_len = sizeof(dummy_udp_packet);
5705 *offsets = dummy_udp_packet_offsets;
5707 } else if (udp && ipv6) {
5708 *pkt = dummy_udp_ipv6_packet;
5709 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5710 *offsets = dummy_udp_ipv6_packet_offsets;
5712 } else if ((tcp && ipv6) || ipv6) {
5713 *pkt = dummy_tcp_ipv6_packet;
5714 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5715 *offsets = dummy_tcp_ipv6_packet_offsets;
5719 *pkt = dummy_tcp_packet;
5720 *pkt_len = sizeof(dummy_tcp_packet);
5721 *offsets = dummy_tcp_packet_offsets;
5725 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5727 * @lkups: lookup elements or match criteria for the advanced recipe, one
5728 * structure per protocol header
5729 * @lkups_cnt: number of protocols
5730 * @s_rule: stores rule information from the match criteria
5731 * @dummy_pkt: dummy packet to fill according to filter match criteria
5732 * @pkt_len: packet length of dummy packet
5733 * @offsets: offset info for the dummy packet
5735 static enum ice_status
5736 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5737 struct ice_aqc_sw_rules_elem *s_rule,
5738 const u8 *dummy_pkt, u16 pkt_len,
5739 const struct ice_dummy_pkt_offsets *offsets)
5744 /* Start with a packet with a pre-defined/dummy content. Then, fill
5745 * in the header values to be looked up or matched.
5747 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5749 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5751 for (i = 0; i < lkups_cnt; i++) {
5752 enum ice_protocol_type type;
5753 u16 offset = 0, len = 0, j;
5756 /* find the start of this layer; it should be found since this
5757 * was already checked when search for the dummy packet
5759 type = lkups[i].type;
5760 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5761 if (type == offsets[j].type) {
5762 offset = offsets[j].offset;
5767 /* this should never happen in a correct calling sequence */
5769 return ICE_ERR_PARAM;
5771 switch (lkups[i].type) {
5774 len = sizeof(struct ice_ether_hdr);
5777 len = sizeof(struct ice_ethtype_hdr);
5780 len = sizeof(struct ice_vlan_hdr);
5784 len = sizeof(struct ice_ipv4_hdr);
5788 /* Based on the same mechanism below, if tc (Traffic
5789 * Class) for IPv6 has mask, it means tc field is set.
5790 * Since tc is only one byte, we have to handle the
5791 * big/little endian issue before it can be inserted.
5793 if (lkups[i].m_u.ipv6_hdr.tc) {
5794 ((u16 *)&lkups[i].h_u)[0] =
5795 (((u16 *)&lkups[i].h_u)[0] << 8) |
5796 (((u16 *)&lkups[i].h_u)[0] >> 8);
5797 ((u16 *)&lkups[i].m_u)[0] =
5798 (((u16 *)&lkups[i].m_u)[0] << 8) |
5799 (((u16 *)&lkups[i].m_u)[0] >> 8);
5801 len = sizeof(struct ice_ipv6_hdr);
5806 len = sizeof(struct ice_l4_hdr);
5809 len = sizeof(struct ice_sctp_hdr);
5812 len = sizeof(struct ice_nvgre);
5817 len = sizeof(struct ice_udp_tnl_hdr);
5821 len = sizeof(struct ice_udp_gtp_hdr);
5824 len = sizeof(struct ice_pppoe_hdr);
5827 return ICE_ERR_PARAM;
5830 /* the length should be a word multiple */
5831 if (len % ICE_BYTES_PER_WORD)
5834 /* We have the offset to the header start, the length, the
5835 * caller's header values and mask. Use this information to
5836 * copy the data into the dummy packet appropriately based on
5837 * the mask. Note that we need to only write the bits as
5838 * indicated by the mask to make sure we don't improperly write
5839 * over any significant packet data.
5841 for (j = 0; j < len / sizeof(u16); j++)
5842 if (((u16 *)&lkups[i].m_u)[j])
5843 ((u16 *)(pkt + offset))[j] =
5844 (((u16 *)(pkt + offset))[j] &
5845 ~((u16 *)&lkups[i].m_u)[j]) |
5846 (((u16 *)&lkups[i].h_u)[j] &
5847 ((u16 *)&lkups[i].m_u)[j]);
5850 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5856 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5857 * @hw: pointer to the hardware structure
5858 * @tun_type: tunnel type
5859 * @pkt: dummy packet to fill in
5860 * @offsets: offset info for the dummy packet
5862 static enum ice_status
5863 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5864 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5869 case ICE_SW_TUN_AND_NON_TUN:
5870 case ICE_SW_TUN_VXLAN_GPE:
5871 case ICE_SW_TUN_VXLAN:
5872 case ICE_SW_TUN_UDP:
5873 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
5877 case ICE_SW_TUN_GENEVE:
5878 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
5883 /* Nothing needs to be done for this tunnel type */
5887 /* Find the outer UDP protocol header and insert the port number */
5888 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5889 if (offsets[i].type == ICE_UDP_OF) {
5890 struct ice_l4_hdr *hdr;
5893 offset = offsets[i].offset;
5894 hdr = (struct ice_l4_hdr *)&pkt[offset];
5895 hdr->dst_port = open_port << 8 | open_port >> 8;
5905 * ice_find_adv_rule_entry - Search a rule entry
5906 * @hw: pointer to the hardware structure
5907 * @lkups: lookup elements or match criteria for the advanced recipe, one
5908 * structure per protocol header
5909 * @lkups_cnt: number of protocols
5910 * @recp_id: recipe ID for which we are finding the rule
5911 * @rinfo: other information regarding the rule e.g. priority and action info
5913 * Helper function to search for a given advance rule entry
5914 * Returns pointer to entry storing the rule if found
5916 static struct ice_adv_fltr_mgmt_list_entry *
5917 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5918 u16 lkups_cnt, u8 recp_id,
5919 struct ice_adv_rule_info *rinfo)
5921 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5922 struct ice_switch_info *sw = hw->switch_info;
5925 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5926 ice_adv_fltr_mgmt_list_entry, list_entry) {
5927 bool lkups_matched = true;
5929 if (lkups_cnt != list_itr->lkups_cnt)
5931 for (i = 0; i < list_itr->lkups_cnt; i++)
5932 if (memcmp(&list_itr->lkups[i], &lkups[i],
5934 lkups_matched = false;
5937 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5938 rinfo->tun_type == list_itr->rule_info.tun_type &&
5946 * ice_adv_add_update_vsi_list
5947 * @hw: pointer to the hardware structure
5948 * @m_entry: pointer to current adv filter management list entry
5949 * @cur_fltr: filter information from the book keeping entry
5950 * @new_fltr: filter information with the new VSI to be added
5952 * Call AQ command to add or update previously created VSI list with new VSI.
5954 * Helper function to do book keeping associated with adding filter information
5955 * The algorithm to do the booking keeping is described below :
5956 * When a VSI needs to subscribe to a given advanced filter
5957 * if only one VSI has been added till now
5958 * Allocate a new VSI list and add two VSIs
5959 * to this list using switch rule command
5960 * Update the previously created switch rule with the
5961 * newly created VSI list ID
5962 * if a VSI list was previously created
5963 * Add the new VSI to the previously created VSI list set
5964 * using the update switch rule command
5966 static enum ice_status
5967 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5968 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5969 struct ice_adv_rule_info *cur_fltr,
5970 struct ice_adv_rule_info *new_fltr)
5972 enum ice_status status;
5973 u16 vsi_list_id = 0;
5975 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5976 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5977 return ICE_ERR_NOT_IMPL;
5979 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5980 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5981 return ICE_ERR_ALREADY_EXISTS;
5983 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5984 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5985 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5986 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5987 return ICE_ERR_NOT_IMPL;
5989 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5990 /* Only one entry existed in the mapping and it was not already
5991 * a part of a VSI list. So, create a VSI list with the old and
5994 struct ice_fltr_info tmp_fltr;
5995 u16 vsi_handle_arr[2];
5997 /* A rule already exists with the new VSI being added */
5998 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5999 new_fltr->sw_act.fwd_id.hw_vsi_id)
6000 return ICE_ERR_ALREADY_EXISTS;
6002 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6003 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6004 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6010 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6011 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6012 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6013 /* Update the previous switch rule of "forward to VSI" to
6016 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6020 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6021 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6022 m_entry->vsi_list_info =
6023 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6026 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6028 if (!m_entry->vsi_list_info)
6031 /* A rule already exists with the new VSI being added */
6032 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6035 /* Update the previously created VSI list set with
6036 * the new VSI ID passed in
6038 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6040 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6042 ice_aqc_opc_update_sw_rules,
6044 /* update VSI list mapping info with new VSI ID */
6046 ice_set_bit(vsi_handle,
6047 m_entry->vsi_list_info->vsi_map);
6050 m_entry->vsi_count++;
6055 * ice_add_adv_rule - helper function to create an advanced switch rule
6056 * @hw: pointer to the hardware structure
6057 * @lkups: information on the words that needs to be looked up. All words
6058 * together makes one recipe
6059 * @lkups_cnt: num of entries in the lkups array
6060 * @rinfo: other information related to the rule that needs to be programmed
6061 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6062 * ignored is case of error.
6064 * This function can program only 1 rule at a time. The lkups is used to
6065 * describe the all the words that forms the "lookup" portion of the recipe.
6066 * These words can span multiple protocols. Callers to this function need to
6067 * pass in a list of protocol headers with lookup information along and mask
6068 * that determines which words are valid from the given protocol header.
6069 * rinfo describes other information related to this rule such as forwarding
6070 * IDs, priority of this rule, etc.
6073 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6074 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6075 struct ice_rule_query_data *added_entry)
6077 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6078 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6079 const struct ice_dummy_pkt_offsets *pkt_offsets;
6080 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6081 struct LIST_HEAD_TYPE *rule_head;
6082 struct ice_switch_info *sw;
6083 enum ice_status status;
6084 const u8 *pkt = NULL;
6089 /* Initialize profile to result index bitmap */
6090 if (!hw->switch_info->prof_res_bm_init) {
6091 hw->switch_info->prof_res_bm_init = 1;
6092 ice_init_prof_result_bm(hw);
6096 return ICE_ERR_PARAM;
6098 /* get # of words we need to match */
6100 for (i = 0; i < lkups_cnt; i++) {
6103 ptr = (u16 *)&lkups[i].m_u;
6104 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6108 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6109 return ICE_ERR_PARAM;
6111 /* make sure that we can locate a dummy packet */
6112 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6115 status = ICE_ERR_PARAM;
6116 goto err_ice_add_adv_rule;
6119 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6120 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6121 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6122 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6125 vsi_handle = rinfo->sw_act.vsi_handle;
6126 if (!ice_is_vsi_valid(hw, vsi_handle))
6127 return ICE_ERR_PARAM;
6129 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6130 rinfo->sw_act.fwd_id.hw_vsi_id =
6131 ice_get_hw_vsi_num(hw, vsi_handle);
6132 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6133 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6135 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6138 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6140 /* we have to add VSI to VSI_LIST and increment vsi_count.
6141 * Also Update VSI list so that we can change forwarding rule
6142 * if the rule already exists, we will check if it exists with
6143 * same vsi_id, if not then add it to the VSI list if it already
6144 * exists if not then create a VSI list and add the existing VSI
6145 * ID and the new VSI ID to the list
6146 * We will add that VSI to the list
6148 status = ice_adv_add_update_vsi_list(hw, m_entry,
6149 &m_entry->rule_info,
6152 added_entry->rid = rid;
6153 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6154 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6158 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6159 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6161 return ICE_ERR_NO_MEMORY;
6162 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6163 switch (rinfo->sw_act.fltr_act) {
6164 case ICE_FWD_TO_VSI:
6165 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6166 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6167 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6170 act |= ICE_SINGLE_ACT_TO_Q;
6171 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6172 ICE_SINGLE_ACT_Q_INDEX_M;
6174 case ICE_FWD_TO_QGRP:
6175 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6176 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6177 act |= ICE_SINGLE_ACT_TO_Q;
6178 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6179 ICE_SINGLE_ACT_Q_INDEX_M;
6180 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6181 ICE_SINGLE_ACT_Q_REGION_M;
6183 case ICE_DROP_PACKET:
6184 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6185 ICE_SINGLE_ACT_VALID_BIT;
6188 status = ICE_ERR_CFG;
6189 goto err_ice_add_adv_rule;
6192 /* set the rule LOOKUP type based on caller specified 'RX'
6193 * instead of hardcoding it to be either LOOKUP_TX/RX
6195 * for 'RX' set the source to be the port number
6196 * for 'TX' set the source to be the source HW VSI number (determined
6200 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6201 s_rule->pdata.lkup_tx_rx.src =
6202 CPU_TO_LE16(hw->port_info->lport);
6204 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6205 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6208 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6209 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6211 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6212 pkt_len, pkt_offsets);
6214 goto err_ice_add_adv_rule;
6216 if (rinfo->tun_type != ICE_NON_TUN) {
6217 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6218 s_rule->pdata.lkup_tx_rx.hdr,
6221 goto err_ice_add_adv_rule;
6224 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6225 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6228 goto err_ice_add_adv_rule;
6229 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6230 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6232 status = ICE_ERR_NO_MEMORY;
6233 goto err_ice_add_adv_rule;
6236 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6237 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6238 ICE_NONDMA_TO_NONDMA);
6239 if (!adv_fltr->lkups) {
6240 status = ICE_ERR_NO_MEMORY;
6241 goto err_ice_add_adv_rule;
6244 adv_fltr->lkups_cnt = lkups_cnt;
6245 adv_fltr->rule_info = *rinfo;
6246 adv_fltr->rule_info.fltr_rule_id =
6247 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6248 sw = hw->switch_info;
6249 sw->recp_list[rid].adv_rule = true;
6250 rule_head = &sw->recp_list[rid].filt_rules;
6252 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6253 struct ice_fltr_info tmp_fltr;
6255 tmp_fltr.fltr_rule_id =
6256 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6257 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6258 tmp_fltr.fwd_id.hw_vsi_id =
6259 ice_get_hw_vsi_num(hw, vsi_handle);
6260 tmp_fltr.vsi_handle = vsi_handle;
6261 /* Update the previous switch rule of "forward to VSI" to
6264 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6266 goto err_ice_add_adv_rule;
6267 adv_fltr->vsi_count = 1;
6270 /* Add rule entry to book keeping list */
6271 LIST_ADD(&adv_fltr->list_entry, rule_head);
6273 added_entry->rid = rid;
6274 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6275 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6277 err_ice_add_adv_rule:
6278 if (status && adv_fltr) {
6279 ice_free(hw, adv_fltr->lkups);
6280 ice_free(hw, adv_fltr);
6283 ice_free(hw, s_rule);
6289 * ice_adv_rem_update_vsi_list
6290 * @hw: pointer to the hardware structure
6291 * @vsi_handle: VSI handle of the VSI to remove
6292 * @fm_list: filter management entry for which the VSI list management needs to
6295 static enum ice_status
6296 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6297 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6299 struct ice_vsi_list_map_info *vsi_list_info;
6300 enum ice_sw_lkup_type lkup_type;
6301 enum ice_status status;
6304 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6305 fm_list->vsi_count == 0)
6306 return ICE_ERR_PARAM;
6308 /* A rule with the VSI being removed does not exist */
6309 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6310 return ICE_ERR_DOES_NOT_EXIST;
6312 lkup_type = ICE_SW_LKUP_LAST;
6313 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6314 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6315 ice_aqc_opc_update_sw_rules,
6320 fm_list->vsi_count--;
6321 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6322 vsi_list_info = fm_list->vsi_list_info;
6323 if (fm_list->vsi_count == 1) {
6324 struct ice_fltr_info tmp_fltr;
6327 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6329 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6330 return ICE_ERR_OUT_OF_RANGE;
6332 /* Make sure VSI list is empty before removing it below */
6333 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6335 ice_aqc_opc_update_sw_rules,
6339 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6340 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6341 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6342 tmp_fltr.fwd_id.hw_vsi_id =
6343 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6344 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6345 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6347 /* Update the previous switch rule of "MAC forward to VSI" to
6348 * "MAC fwd to VSI list"
6350 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6352 ice_debug(hw, ICE_DBG_SW,
6353 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6354 tmp_fltr.fwd_id.hw_vsi_id, status);
6358 /* Remove the VSI list since it is no longer used */
6359 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6361 ice_debug(hw, ICE_DBG_SW,
6362 "Failed to remove VSI list %d, error %d\n",
6363 vsi_list_id, status);
6367 LIST_DEL(&vsi_list_info->list_entry);
6368 ice_free(hw, vsi_list_info);
6369 fm_list->vsi_list_info = NULL;
6376 * ice_rem_adv_rule - removes existing advanced switch rule
6377 * @hw: pointer to the hardware structure
6378 * @lkups: information on the words that needs to be looked up. All words
6379 * together makes one recipe
6380 * @lkups_cnt: num of entries in the lkups array
6381 * @rinfo: Its the pointer to the rule information for the rule
6383 * This function can be used to remove 1 rule at a time. The lkups is
6384 * used to describe all the words that forms the "lookup" portion of the
6385 * rule. These words can span multiple protocols. Callers to this function
6386 * need to pass in a list of protocol headers with lookup information along
6387 * and mask that determines which words are valid from the given protocol
6388 * header. rinfo describes other information related to this rule such as
6389 * forwarding IDs, priority of this rule, etc.
6392 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6393 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6395 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6396 struct ice_prot_lkup_ext lkup_exts;
6397 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6398 enum ice_status status = ICE_SUCCESS;
6399 bool remove_rule = false;
6400 u16 i, rid, vsi_handle;
6402 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6403 for (i = 0; i < lkups_cnt; i++) {
6406 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6409 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6414 /* Create any special protocol/offset pairs, such as looking at tunnel
6415 * bits by extracting metadata
6417 status = ice_add_special_words(rinfo, &lkup_exts);
6421 rid = ice_find_recp(hw, &lkup_exts);
6422 /* If did not find a recipe that match the existing criteria */
6423 if (rid == ICE_MAX_NUM_RECIPES)
6424 return ICE_ERR_PARAM;
6426 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6427 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6428 /* the rule is already removed */
6431 ice_acquire_lock(rule_lock);
6432 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6434 } else if (list_elem->vsi_count > 1) {
6435 list_elem->vsi_list_info->ref_cnt--;
6436 remove_rule = false;
6437 vsi_handle = rinfo->sw_act.vsi_handle;
6438 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6440 vsi_handle = rinfo->sw_act.vsi_handle;
6441 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6443 ice_release_lock(rule_lock);
6446 if (list_elem->vsi_count == 0)
6449 ice_release_lock(rule_lock);
6451 struct ice_aqc_sw_rules_elem *s_rule;
6454 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6456 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6459 return ICE_ERR_NO_MEMORY;
6460 s_rule->pdata.lkup_tx_rx.act = 0;
6461 s_rule->pdata.lkup_tx_rx.index =
6462 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6463 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6464 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6466 ice_aqc_opc_remove_sw_rules, NULL);
6467 if (status == ICE_SUCCESS) {
6468 ice_acquire_lock(rule_lock);
6469 LIST_DEL(&list_elem->list_entry);
6470 ice_free(hw, list_elem->lkups);
6471 ice_free(hw, list_elem);
6472 ice_release_lock(rule_lock);
6474 ice_free(hw, s_rule);
6480 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6481 * @hw: pointer to the hardware structure
6482 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6484 * This function is used to remove 1 rule at a time. The removal is based on
6485 * the remove_entry parameter. This function will remove rule for a given
6486 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6489 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6490 struct ice_rule_query_data *remove_entry)
6492 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6493 struct LIST_HEAD_TYPE *list_head;
6494 struct ice_adv_rule_info rinfo;
6495 struct ice_switch_info *sw;
6497 sw = hw->switch_info;
6498 if (!sw->recp_list[remove_entry->rid].recp_created)
6499 return ICE_ERR_PARAM;
6500 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6501 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6503 if (list_itr->rule_info.fltr_rule_id ==
6504 remove_entry->rule_id) {
6505 rinfo = list_itr->rule_info;
6506 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6507 return ice_rem_adv_rule(hw, list_itr->lkups,
6508 list_itr->lkups_cnt, &rinfo);
6511 return ICE_ERR_PARAM;
6515 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6517 * @hw: pointer to the hardware structure
6518 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6520 * This function is used to remove all the rules for a given VSI and as soon
6521 * as removing a rule fails, it will return immediately with the error code,
6522 * else it will return ICE_SUCCESS
6525 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6527 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6528 struct ice_vsi_list_map_info *map_info;
6529 struct LIST_HEAD_TYPE *list_head;
6530 struct ice_adv_rule_info rinfo;
6531 struct ice_switch_info *sw;
6532 enum ice_status status;
6533 u16 vsi_list_id = 0;
6536 sw = hw->switch_info;
6537 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6538 if (!sw->recp_list[rid].recp_created)
6540 if (!sw->recp_list[rid].adv_rule)
6542 list_head = &sw->recp_list[rid].filt_rules;
6544 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6545 ice_adv_fltr_mgmt_list_entry, list_entry) {
6546 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6550 rinfo = list_itr->rule_info;
6551 rinfo.sw_act.vsi_handle = vsi_handle;
6552 status = ice_rem_adv_rule(hw, list_itr->lkups,
6553 list_itr->lkups_cnt, &rinfo);
6563 * ice_replay_fltr - Replay all the filters stored by a specific list head
6564 * @hw: pointer to the hardware structure
6565 * @list_head: list for which filters needs to be replayed
6566 * @recp_id: Recipe ID for which rules need to be replayed
6568 static enum ice_status
6569 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6571 struct ice_fltr_mgmt_list_entry *itr;
6572 struct LIST_HEAD_TYPE l_head;
6573 enum ice_status status = ICE_SUCCESS;
6575 if (LIST_EMPTY(list_head))
6578 /* Move entries from the given list_head to a temporary l_head so that
6579 * they can be replayed. Otherwise when trying to re-add the same
6580 * filter, the function will return already exists
6582 LIST_REPLACE_INIT(list_head, &l_head);
6584 /* Mark the given list_head empty by reinitializing it so filters
6585 * could be added again by *handler
6587 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6589 struct ice_fltr_list_entry f_entry;
6591 f_entry.fltr_info = itr->fltr_info;
6592 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6593 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6594 if (status != ICE_SUCCESS)
6599 /* Add a filter per VSI separately */
6604 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6606 if (!ice_is_vsi_valid(hw, vsi_handle))
6609 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6610 f_entry.fltr_info.vsi_handle = vsi_handle;
6611 f_entry.fltr_info.fwd_id.hw_vsi_id =
6612 ice_get_hw_vsi_num(hw, vsi_handle);
6613 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6614 if (recp_id == ICE_SW_LKUP_VLAN)
6615 status = ice_add_vlan_internal(hw, &f_entry);
6617 status = ice_add_rule_internal(hw, recp_id,
6619 if (status != ICE_SUCCESS)
6624 /* Clear the filter management list */
6625 ice_rem_sw_rule_info(hw, &l_head);
6630 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6631 * @hw: pointer to the hardware structure
6633 * NOTE: This function does not clean up partially added filters on error.
6634 * It is up to caller of the function to issue a reset or fail early.
6636 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6638 struct ice_switch_info *sw = hw->switch_info;
6639 enum ice_status status = ICE_SUCCESS;
6642 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6643 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6645 status = ice_replay_fltr(hw, i, head);
6646 if (status != ICE_SUCCESS)
6653 * ice_replay_vsi_fltr - Replay filters for requested VSI
6654 * @hw: pointer to the hardware structure
6655 * @vsi_handle: driver VSI handle
6656 * @recp_id: Recipe ID for which rules need to be replayed
6657 * @list_head: list for which filters need to be replayed
6659 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6660 * It is required to pass valid VSI handle.
6662 static enum ice_status
6663 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6664 struct LIST_HEAD_TYPE *list_head)
6666 struct ice_fltr_mgmt_list_entry *itr;
6667 enum ice_status status = ICE_SUCCESS;
6670 if (LIST_EMPTY(list_head))
6672 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6674 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6676 struct ice_fltr_list_entry f_entry;
6678 f_entry.fltr_info = itr->fltr_info;
6679 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6680 itr->fltr_info.vsi_handle == vsi_handle) {
6681 /* update the src in case it is VSI num */
6682 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6683 f_entry.fltr_info.src = hw_vsi_id;
6684 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6685 if (status != ICE_SUCCESS)
6689 if (!itr->vsi_list_info ||
6690 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6692 /* Clearing it so that the logic can add it back */
6693 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6694 f_entry.fltr_info.vsi_handle = vsi_handle;
6695 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6696 /* update the src in case it is VSI num */
6697 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6698 f_entry.fltr_info.src = hw_vsi_id;
6699 if (recp_id == ICE_SW_LKUP_VLAN)
6700 status = ice_add_vlan_internal(hw, &f_entry);
6702 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6703 if (status != ICE_SUCCESS)
6711 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6712 * @hw: pointer to the hardware structure
6713 * @vsi_handle: driver VSI handle
6714 * @list_head: list for which filters need to be replayed
6716 * Replay the advanced rule for the given VSI.
6718 static enum ice_status
6719 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6720 struct LIST_HEAD_TYPE *list_head)
6722 struct ice_rule_query_data added_entry = { 0 };
6723 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6724 enum ice_status status = ICE_SUCCESS;
6726 if (LIST_EMPTY(list_head))
6728 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6730 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6731 u16 lk_cnt = adv_fltr->lkups_cnt;
6733 if (vsi_handle != rinfo->sw_act.vsi_handle)
6735 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6744 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6745 * @hw: pointer to the hardware structure
6746 * @vsi_handle: driver VSI handle
6748 * Replays filters for requested VSI via vsi_handle.
6750 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6752 struct ice_switch_info *sw = hw->switch_info;
6753 enum ice_status status;
6756 /* Update the recipes that were created */
6757 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6758 struct LIST_HEAD_TYPE *head;
6760 head = &sw->recp_list[i].filt_replay_rules;
6761 if (!sw->recp_list[i].adv_rule)
6762 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6764 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6765 if (status != ICE_SUCCESS)
6773 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6774 * @hw: pointer to the HW struct
6776 * Deletes the filter replay rules.
6778 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6780 struct ice_switch_info *sw = hw->switch_info;
6786 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6787 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6788 struct LIST_HEAD_TYPE *l_head;
6790 l_head = &sw->recp_list[i].filt_replay_rules;
6791 if (!sw->recp_list[i].adv_rule)
6792 ice_rem_sw_rule_info(hw, l_head);
6794 ice_rem_adv_rule_info(hw, l_head);