1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
61 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
69 { ICE_PROTOCOL_LAST, 0 },
73 u8 dummy_gre_tcp_packet[] = {
74 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
78 0x08, 0x00, /* ICE_ETYPE_OL 12 */
80 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x2F, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
86 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
87 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
90 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x00, 0x00, 0x00,
94 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
103 0x50, 0x02, 0x20, 0x00,
104 0x00, 0x00, 0x00, 0x00
108 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
110 { ICE_ETYPE_OL, 12 },
111 { ICE_IPV4_OFOS, 14 },
115 { ICE_UDP_ILOS, 76 },
116 { ICE_PROTOCOL_LAST, 0 },
120 u8 dummy_gre_udp_packet[] = {
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x08, 0x00, /* ICE_ETYPE_OL 12 */
127 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x2F, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
133 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
141 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
148 0x00, 0x08, 0x00, 0x00,
152 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
154 { ICE_ETYPE_OL, 12 },
155 { ICE_IPV4_OFOS, 14 },
159 { ICE_VXLAN_GPE, 42 },
163 { ICE_PROTOCOL_LAST, 0 },
167 u8 dummy_udp_tun_tcp_packet[] = {
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_OL 12 */
174 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
175 0x00, 0x01, 0x00, 0x00,
176 0x40, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
181 0x00, 0x46, 0x00, 0x00,
183 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
184 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
187 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
191 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
192 0x00, 0x01, 0x00, 0x00,
193 0x40, 0x06, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
198 0x00, 0x00, 0x00, 0x00,
199 0x00, 0x00, 0x00, 0x00,
200 0x50, 0x02, 0x20, 0x00,
201 0x00, 0x00, 0x00, 0x00
205 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
207 { ICE_ETYPE_OL, 12 },
208 { ICE_IPV4_OFOS, 14 },
212 { ICE_VXLAN_GPE, 42 },
215 { ICE_UDP_ILOS, 84 },
216 { ICE_PROTOCOL_LAST, 0 },
220 u8 dummy_udp_tun_udp_packet[] = {
221 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x08, 0x00, /* ICE_ETYPE_OL 12 */
227 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
228 0x00, 0x01, 0x00, 0x00,
229 0x00, 0x11, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
234 0x00, 0x3a, 0x00, 0x00,
236 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
237 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
251 0x00, 0x08, 0x00, 0x00,
255 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
257 { ICE_ETYPE_OL, 12 },
258 { ICE_IPV4_OFOS, 14 },
259 { ICE_UDP_ILOS, 34 },
260 { ICE_PROTOCOL_LAST, 0 },
264 dummy_udp_packet[] = {
265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x08, 0x00, /* ICE_ETYPE_OL 12 */
271 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
272 0x00, 0x01, 0x00, 0x00,
273 0x00, 0x11, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00,
277 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
278 0x00, 0x08, 0x00, 0x00,
280 0x00, 0x00, /* 2 bytes for 4 byte alignment */
284 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
286 { ICE_ETYPE_OL, 12 },
287 { ICE_IPV4_OFOS, 14 },
289 { ICE_PROTOCOL_LAST, 0 },
293 dummy_tcp_packet[] = {
294 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
295 0x00, 0x00, 0x00, 0x00,
296 0x00, 0x00, 0x00, 0x00,
298 0x08, 0x00, /* ICE_ETYPE_OL 12 */
300 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
301 0x00, 0x01, 0x00, 0x00,
302 0x00, 0x06, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
307 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x50, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, /* 2 bytes for 4 byte alignment */
316 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
318 { ICE_ETYPE_OL, 12 },
319 { ICE_IPV6_OFOS, 14 },
321 { ICE_PROTOCOL_LAST, 0 },
325 dummy_tcp_ipv6_packet[] = {
326 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
332 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
333 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x50, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, /* 2 bytes for 4 byte alignment */
353 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
355 { ICE_ETYPE_OL, 12 },
356 { ICE_IPV6_OFOS, 14 },
357 { ICE_UDP_ILOS, 54 },
358 { ICE_PROTOCOL_LAST, 0 },
362 dummy_udp_ipv6_packet[] = {
363 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
364 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00,
367 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
369 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
370 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
381 0x00, 0x08, 0x00, 0x00,
383 0x00, 0x00, /* 2 bytes for 4 byte alignment */
387 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
389 { ICE_IPV4_OFOS, 14 },
392 { ICE_PROTOCOL_LAST, 0 },
396 dummy_udp_gtp_packet[] = {
397 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
402 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
403 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x11, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
409 0x00, 0x1c, 0x00, 0x00,
411 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
412 0x00, 0x00, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x85,
415 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
416 0x00, 0x00, 0x00, 0x00,
420 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
422 { ICE_ETYPE_OL, 12 },
423 { ICE_VLAN_OFOS, 14},
425 { ICE_PROTOCOL_LAST, 0 },
429 dummy_pppoe_packet[] = {
430 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
431 0x00, 0x00, 0x00, 0x00,
432 0x00, 0x00, 0x00, 0x00,
434 0x81, 0x00, /* ICE_ETYPE_OL 12 */
436 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
438 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
441 0x00, 0x21, /* PPP Link Layer 24 */
443 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, 0x00, 0x00,
449 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
452 /* this is a recipe to profile association bitmap */
453 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
454 ICE_MAX_NUM_PROFILES);
456 /* this is a profile to recipe association bitmap */
457 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
458 ICE_MAX_NUM_RECIPES);
460 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
463 * ice_collect_result_idx - copy result index values
464 * @buf: buffer that contains the result index
465 * @recp: the recipe struct to copy data into
467 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
468 struct ice_sw_recipe *recp)
470 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
471 ice_set_bit(buf->content.result_indx &
472 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
476 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
477 * @hw: pointer to hardware structure
478 * @recps: struct that we need to populate
479 * @rid: recipe ID that we are populating
480 * @refresh_required: true if we should get recipe to profile mapping from FW
482 * This function is used to populate all the necessary entries into our
483 * bookkeeping so that we have a current list of all the recipes that are
484 * programmed in the firmware.
486 static enum ice_status
487 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
488 bool *refresh_required)
490 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
491 struct ice_aqc_recipe_data_elem *tmp;
492 u16 num_recps = ICE_MAX_NUM_RECIPES;
493 struct ice_prot_lkup_ext *lkup_exts;
494 u16 i, sub_recps, fv_word_idx = 0;
495 enum ice_status status;
497 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
499 /* we need a buffer big enough to accommodate all the recipes */
500 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
501 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
503 return ICE_ERR_NO_MEMORY;
505 tmp[0].recipe_indx = rid;
506 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
507 /* non-zero status meaning recipe doesn't exist */
511 /* Get recipe to profile map so that we can get the fv from lkups that
512 * we read for a recipe from FW. Since we want to minimize the number of
513 * times we make this FW call, just make one call and cache the copy
514 * until a new recipe is added. This operation is only required the
515 * first time to get the changes from FW. Then to search existing
516 * entries we don't need to update the cache again until another recipe
519 if (*refresh_required) {
520 ice_get_recp_to_prof_map(hw);
521 *refresh_required = false;
524 /* Start populating all the entries for recps[rid] based on lkups from
525 * firmware. Note that we are only creating the root recipe in our
528 lkup_exts = &recps[rid].lkup_exts;
530 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
531 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
532 struct ice_recp_grp_entry *rg_entry;
533 u8 prof, idx, prot = 0;
537 rg_entry = (struct ice_recp_grp_entry *)
538 ice_malloc(hw, sizeof(*rg_entry));
540 status = ICE_ERR_NO_MEMORY;
544 idx = root_bufs.recipe_indx;
545 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
547 /* Mark all result indices in this chain */
548 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
549 ice_set_bit(root_bufs.content.result_indx &
550 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
552 /* get the first profile that is associated with rid */
553 prof = ice_find_first_bit(recipe_to_profile[idx],
554 ICE_MAX_NUM_PROFILES);
555 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
556 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
558 rg_entry->fv_idx[i] = lkup_indx;
559 rg_entry->fv_mask[i] =
560 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
562 /* If the recipe is a chained recipe then all its
563 * child recipe's result will have a result index.
564 * To fill fv_words we should not use those result
565 * index, we only need the protocol ids and offsets.
566 * We will skip all the fv_idx which stores result
567 * index in them. We also need to skip any fv_idx which
568 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
569 * valid offset value.
571 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
572 rg_entry->fv_idx[i]) ||
573 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
574 rg_entry->fv_idx[i] == 0)
577 ice_find_prot_off(hw, ICE_BLK_SW, prof,
578 rg_entry->fv_idx[i], &prot, &off);
579 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
580 lkup_exts->fv_words[fv_word_idx].off = off;
583 /* populate rg_list with the data from the child entry of this
586 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
588 /* Propagate some data to the recipe database */
589 recps[idx].is_root = is_root;
590 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
591 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
592 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
593 recps[idx].chain_idx = root_bufs.content.result_indx &
594 ~ICE_AQ_RECIPE_RESULT_EN;
595 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
597 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
603 /* Only do the following for root recipes entries */
604 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
605 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
606 recps[idx].root_rid = root_bufs.content.rid &
607 ~ICE_AQ_RECIPE_ID_IS_ROOT;
608 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
611 /* Complete initialization of the root recipe entry */
612 lkup_exts->n_val_words = fv_word_idx;
613 recps[rid].big_recp = (num_recps > 1);
614 recps[rid].n_grp_count = num_recps;
615 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
616 ice_memdup(hw, tmp, recps[rid].n_grp_count *
617 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
618 if (!recps[rid].root_buf)
621 /* Copy result indexes */
622 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
623 recps[rid].recp_created = true;
631 * ice_get_recp_to_prof_map - updates recipe to profile mapping
632 * @hw: pointer to hardware structure
634 * This function is used to populate recipe_to_profile matrix where index to
635 * this array is the recipe ID and the element is the mapping of which profiles
636 * is this recipe mapped to.
639 ice_get_recp_to_prof_map(struct ice_hw *hw)
641 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
644 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
647 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
648 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
649 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
651 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
652 ICE_MAX_NUM_RECIPES);
653 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
654 if (ice_is_bit_set(r_bitmap, j))
655 ice_set_bit(i, recipe_to_profile[j]);
660 * ice_init_def_sw_recp - initialize the recipe book keeping tables
661 * @hw: pointer to the HW struct
663 * Allocate memory for the entire recipe table and initialize the structures/
664 * entries corresponding to basic recipes.
666 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
668 struct ice_sw_recipe *recps;
671 recps = (struct ice_sw_recipe *)
672 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
674 return ICE_ERR_NO_MEMORY;
676 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
677 recps[i].root_rid = i;
678 INIT_LIST_HEAD(&recps[i].filt_rules);
679 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
680 INIT_LIST_HEAD(&recps[i].rg_list);
681 ice_init_lock(&recps[i].filt_rule_lock);
684 hw->switch_info->recp_list = recps;
690 * ice_aq_get_sw_cfg - get switch configuration
691 * @hw: pointer to the hardware structure
692 * @buf: pointer to the result buffer
693 * @buf_size: length of the buffer available for response
694 * @req_desc: pointer to requested descriptor
695 * @num_elems: pointer to number of elements
696 * @cd: pointer to command details structure or NULL
698 * Get switch configuration (0x0200) to be placed in 'buff'.
699 * This admin command returns information such as initial VSI/port number
700 * and switch ID it belongs to.
702 * NOTE: *req_desc is both an input/output parameter.
703 * The caller of this function first calls this function with *request_desc set
704 * to 0. If the response from f/w has *req_desc set to 0, all the switch
705 * configuration information has been returned; if non-zero (meaning not all
706 * the information was returned), the caller should call this function again
707 * with *req_desc set to the previous value returned by f/w to get the
708 * next block of switch configuration information.
710 * *num_elems is output only parameter. This reflects the number of elements
711 * in response buffer. The caller of this function to use *num_elems while
712 * parsing the response buffer.
714 static enum ice_status
715 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
716 u16 buf_size, u16 *req_desc, u16 *num_elems,
717 struct ice_sq_cd *cd)
719 struct ice_aqc_get_sw_cfg *cmd;
720 enum ice_status status;
721 struct ice_aq_desc desc;
723 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
724 cmd = &desc.params.get_sw_conf;
725 cmd->element = CPU_TO_LE16(*req_desc);
727 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
729 *req_desc = LE16_TO_CPU(cmd->element);
730 *num_elems = LE16_TO_CPU(cmd->num_elems);
737 * ice_alloc_sw - allocate resources specific to switch
738 * @hw: pointer to the HW struct
739 * @ena_stats: true to turn on VEB stats
740 * @shared_res: true for shared resource, false for dedicated resource
741 * @sw_id: switch ID returned
742 * @counter_id: VEB counter ID returned
744 * allocates switch resources (SWID and VEB counter) (0x0208)
747 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
750 struct ice_aqc_alloc_free_res_elem *sw_buf;
751 struct ice_aqc_res_elem *sw_ele;
752 enum ice_status status;
755 buf_len = sizeof(*sw_buf);
756 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
757 ice_malloc(hw, buf_len);
759 return ICE_ERR_NO_MEMORY;
761 /* Prepare buffer for switch ID.
762 * The number of resource entries in buffer is passed as 1 since only a
763 * single switch/VEB instance is allocated, and hence a single sw_id
766 sw_buf->num_elems = CPU_TO_LE16(1);
768 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
769 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
770 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
772 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
773 ice_aqc_opc_alloc_res, NULL);
776 goto ice_alloc_sw_exit;
778 sw_ele = &sw_buf->elem[0];
779 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
782 /* Prepare buffer for VEB Counter */
783 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
784 struct ice_aqc_alloc_free_res_elem *counter_buf;
785 struct ice_aqc_res_elem *counter_ele;
787 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
788 ice_malloc(hw, buf_len);
790 status = ICE_ERR_NO_MEMORY;
791 goto ice_alloc_sw_exit;
794 /* The number of resource entries in buffer is passed as 1 since
795 * only a single switch/VEB instance is allocated, and hence a
796 * single VEB counter is requested.
798 counter_buf->num_elems = CPU_TO_LE16(1);
799 counter_buf->res_type =
800 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
801 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
802 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
806 ice_free(hw, counter_buf);
807 goto ice_alloc_sw_exit;
809 counter_ele = &counter_buf->elem[0];
810 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
811 ice_free(hw, counter_buf);
815 ice_free(hw, sw_buf);
820 * ice_free_sw - free resources specific to switch
821 * @hw: pointer to the HW struct
822 * @sw_id: switch ID returned
823 * @counter_id: VEB counter ID returned
825 * free switch resources (SWID and VEB counter) (0x0209)
827 * NOTE: This function frees multiple resources. It continues
828 * releasing other resources even after it encounters error.
829 * The error code returned is the last error it encountered.
831 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
833 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
834 enum ice_status status, ret_status;
837 buf_len = sizeof(*sw_buf);
838 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
839 ice_malloc(hw, buf_len);
841 return ICE_ERR_NO_MEMORY;
843 /* Prepare buffer to free for switch ID res.
844 * The number of resource entries in buffer is passed as 1 since only a
845 * single switch/VEB instance is freed, and hence a single sw_id
848 sw_buf->num_elems = CPU_TO_LE16(1);
849 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
850 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
852 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
853 ice_aqc_opc_free_res, NULL);
856 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
858 /* Prepare buffer to free for VEB Counter resource */
859 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
860 ice_malloc(hw, buf_len);
862 ice_free(hw, sw_buf);
863 return ICE_ERR_NO_MEMORY;
866 /* The number of resource entries in buffer is passed as 1 since only a
867 * single switch/VEB instance is freed, and hence a single VEB counter
870 counter_buf->num_elems = CPU_TO_LE16(1);
871 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
872 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
874 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
875 ice_aqc_opc_free_res, NULL);
877 ice_debug(hw, ICE_DBG_SW,
878 "VEB counter resource could not be freed\n");
882 ice_free(hw, counter_buf);
883 ice_free(hw, sw_buf);
889 * @hw: pointer to the HW struct
890 * @vsi_ctx: pointer to a VSI context struct
891 * @cd: pointer to command details structure or NULL
893 * Add a VSI context to the hardware (0x0210)
896 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
897 struct ice_sq_cd *cd)
899 struct ice_aqc_add_update_free_vsi_resp *res;
900 struct ice_aqc_add_get_update_free_vsi *cmd;
901 struct ice_aq_desc desc;
902 enum ice_status status;
904 cmd = &desc.params.vsi_cmd;
905 res = &desc.params.add_update_free_vsi_res;
907 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
909 if (!vsi_ctx->alloc_from_pool)
910 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
911 ICE_AQ_VSI_IS_VALID);
913 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
915 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
917 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
918 sizeof(vsi_ctx->info), cd);
921 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
922 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
923 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
931 * @hw: pointer to the HW struct
932 * @vsi_ctx: pointer to a VSI context struct
933 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
934 * @cd: pointer to command details structure or NULL
936 * Free VSI context info from hardware (0x0213)
939 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
940 bool keep_vsi_alloc, struct ice_sq_cd *cd)
942 struct ice_aqc_add_update_free_vsi_resp *resp;
943 struct ice_aqc_add_get_update_free_vsi *cmd;
944 struct ice_aq_desc desc;
945 enum ice_status status;
947 cmd = &desc.params.vsi_cmd;
948 resp = &desc.params.add_update_free_vsi_res;
950 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
952 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
954 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
956 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
958 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
959 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
967 * @hw: pointer to the HW struct
968 * @vsi_ctx: pointer to a VSI context struct
969 * @cd: pointer to command details structure or NULL
971 * Update VSI context in the hardware (0x0211)
974 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
975 struct ice_sq_cd *cd)
977 struct ice_aqc_add_update_free_vsi_resp *resp;
978 struct ice_aqc_add_get_update_free_vsi *cmd;
979 struct ice_aq_desc desc;
980 enum ice_status status;
982 cmd = &desc.params.vsi_cmd;
983 resp = &desc.params.add_update_free_vsi_res;
985 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
987 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
989 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
991 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
992 sizeof(vsi_ctx->info), cd);
995 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
996 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1003 * ice_is_vsi_valid - check whether the VSI is valid or not
1004 * @hw: pointer to the HW struct
1005 * @vsi_handle: VSI handle
1007 * check whether the VSI is valid or not
1009 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1011 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1015 * ice_get_hw_vsi_num - return the HW VSI number
1016 * @hw: pointer to the HW struct
1017 * @vsi_handle: VSI handle
1019 * return the HW VSI number
1020 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1022 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1024 return hw->vsi_ctx[vsi_handle]->vsi_num;
1028 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1029 * @hw: pointer to the HW struct
1030 * @vsi_handle: VSI handle
1032 * return the VSI context entry for a given VSI handle
1034 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1036 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1040 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1041 * @hw: pointer to the HW struct
1042 * @vsi_handle: VSI handle
1043 * @vsi: VSI context pointer
1045 * save the VSI context entry for a given VSI handle
1048 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1050 hw->vsi_ctx[vsi_handle] = vsi;
1054 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1055 * @hw: pointer to the HW struct
1056 * @vsi_handle: VSI handle
1058 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1060 struct ice_vsi_ctx *vsi;
1063 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1066 ice_for_each_traffic_class(i) {
1067 if (vsi->lan_q_ctx[i]) {
1068 ice_free(hw, vsi->lan_q_ctx[i]);
1069 vsi->lan_q_ctx[i] = NULL;
1075 * ice_clear_vsi_ctx - clear the VSI context entry
1076 * @hw: pointer to the HW struct
1077 * @vsi_handle: VSI handle
1079 * clear the VSI context entry
1081 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1083 struct ice_vsi_ctx *vsi;
1085 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1087 ice_clear_vsi_q_ctx(hw, vsi_handle);
1089 hw->vsi_ctx[vsi_handle] = NULL;
1094 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1095 * @hw: pointer to the HW struct
1097 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1101 for (i = 0; i < ICE_MAX_VSI; i++)
1102 ice_clear_vsi_ctx(hw, i);
1106 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1107 * @hw: pointer to the HW struct
1108 * @vsi_handle: unique VSI handle provided by drivers
1109 * @vsi_ctx: pointer to a VSI context struct
1110 * @cd: pointer to command details structure or NULL
1112 * Add a VSI context to the hardware also add it into the VSI handle list.
1113 * If this function gets called after reset for existing VSIs then update
1114 * with the new HW VSI number in the corresponding VSI handle list entry.
1117 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1118 struct ice_sq_cd *cd)
1120 struct ice_vsi_ctx *tmp_vsi_ctx;
1121 enum ice_status status;
1123 if (vsi_handle >= ICE_MAX_VSI)
1124 return ICE_ERR_PARAM;
1125 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1128 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1130 /* Create a new VSI context */
1131 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1132 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1134 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1135 return ICE_ERR_NO_MEMORY;
1137 *tmp_vsi_ctx = *vsi_ctx;
1139 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1141 /* update with new HW VSI num */
1142 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1149 * ice_free_vsi- free VSI context from hardware and VSI handle list
1150 * @hw: pointer to the HW struct
1151 * @vsi_handle: unique VSI handle
1152 * @vsi_ctx: pointer to a VSI context struct
1153 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1154 * @cd: pointer to command details structure or NULL
1156 * Free VSI context info from hardware as well as from VSI handle list
1159 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1160 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1162 enum ice_status status;
1164 if (!ice_is_vsi_valid(hw, vsi_handle))
1165 return ICE_ERR_PARAM;
1166 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1167 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1169 ice_clear_vsi_ctx(hw, vsi_handle);
1175 * @hw: pointer to the HW struct
1176 * @vsi_handle: unique VSI handle
1177 * @vsi_ctx: pointer to a VSI context struct
1178 * @cd: pointer to command details structure or NULL
1180 * Update VSI context in the hardware
1183 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1184 struct ice_sq_cd *cd)
1186 if (!ice_is_vsi_valid(hw, vsi_handle))
1187 return ICE_ERR_PARAM;
1188 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1189 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1193 * ice_aq_get_vsi_params
1194 * @hw: pointer to the HW struct
1195 * @vsi_ctx: pointer to a VSI context struct
1196 * @cd: pointer to command details structure or NULL
1198 * Get VSI context info from hardware (0x0212)
1201 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1202 struct ice_sq_cd *cd)
1204 struct ice_aqc_add_get_update_free_vsi *cmd;
1205 struct ice_aqc_get_vsi_resp *resp;
1206 struct ice_aq_desc desc;
1207 enum ice_status status;
1209 cmd = &desc.params.vsi_cmd;
1210 resp = &desc.params.get_vsi_resp;
1212 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1214 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1216 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1217 sizeof(vsi_ctx->info), cd);
1219 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1221 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1222 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1229 * ice_aq_add_update_mir_rule - add/update a mirror rule
1230 * @hw: pointer to the HW struct
1231 * @rule_type: Rule Type
1232 * @dest_vsi: VSI number to which packets will be mirrored
1233 * @count: length of the list
1234 * @mr_buf: buffer for list of mirrored VSI numbers
1235 * @cd: pointer to command details structure or NULL
1238 * Add/Update Mirror Rule (0x260).
1241 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1242 u16 count, struct ice_mir_rule_buf *mr_buf,
1243 struct ice_sq_cd *cd, u16 *rule_id)
1245 struct ice_aqc_add_update_mir_rule *cmd;
1246 struct ice_aq_desc desc;
1247 enum ice_status status;
1248 __le16 *mr_list = NULL;
1251 switch (rule_type) {
1252 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1253 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1254 /* Make sure count and mr_buf are set for these rule_types */
1255 if (!(count && mr_buf))
1256 return ICE_ERR_PARAM;
1258 buf_size = count * sizeof(__le16);
1259 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1261 return ICE_ERR_NO_MEMORY;
1263 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1264 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1265 /* Make sure count and mr_buf are not set for these
1268 if (count || mr_buf)
1269 return ICE_ERR_PARAM;
1272 ice_debug(hw, ICE_DBG_SW,
1273 "Error due to unsupported rule_type %u\n", rule_type);
1274 return ICE_ERR_OUT_OF_RANGE;
1277 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1279 /* Pre-process 'mr_buf' items for add/update of virtual port
1280 * ingress/egress mirroring (but not physical port ingress/egress
1286 for (i = 0; i < count; i++) {
1289 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1291 /* Validate specified VSI number, make sure it is less
1292 * than ICE_MAX_VSI, if not return with error.
1294 if (id >= ICE_MAX_VSI) {
1295 ice_debug(hw, ICE_DBG_SW,
1296 "Error VSI index (%u) out-of-range\n",
1298 ice_free(hw, mr_list);
1299 return ICE_ERR_OUT_OF_RANGE;
1302 /* add VSI to mirror rule */
1305 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1306 else /* remove VSI from mirror rule */
1307 mr_list[i] = CPU_TO_LE16(id);
1311 cmd = &desc.params.add_update_rule;
1312 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1313 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1314 ICE_AQC_RULE_ID_VALID_M);
1315 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1316 cmd->num_entries = CPU_TO_LE16(count);
1317 cmd->dest = CPU_TO_LE16(dest_vsi);
1319 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1321 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1323 ice_free(hw, mr_list);
1329 * ice_aq_delete_mir_rule - delete a mirror rule
1330 * @hw: pointer to the HW struct
1331 * @rule_id: Mirror rule ID (to be deleted)
1332 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1333 * otherwise it is returned to the shared pool
1334 * @cd: pointer to command details structure or NULL
1336 * Delete Mirror Rule (0x261).
1339 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1340 struct ice_sq_cd *cd)
1342 struct ice_aqc_delete_mir_rule *cmd;
1343 struct ice_aq_desc desc;
1345 /* rule_id should be in the range 0...63 */
1346 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1347 return ICE_ERR_OUT_OF_RANGE;
1349 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1351 cmd = &desc.params.del_rule;
1352 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1353 cmd->rule_id = CPU_TO_LE16(rule_id);
1356 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1358 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1362 * ice_aq_alloc_free_vsi_list
1363 * @hw: pointer to the HW struct
1364 * @vsi_list_id: VSI list ID returned or used for lookup
1365 * @lkup_type: switch rule filter lookup type
1366 * @opc: switch rules population command type - pass in the command opcode
1368 * allocates or free a VSI list resource
1370 static enum ice_status
1371 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1372 enum ice_sw_lkup_type lkup_type,
1373 enum ice_adminq_opc opc)
1375 struct ice_aqc_alloc_free_res_elem *sw_buf;
1376 struct ice_aqc_res_elem *vsi_ele;
1377 enum ice_status status;
1380 buf_len = sizeof(*sw_buf);
1381 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1382 ice_malloc(hw, buf_len);
1384 return ICE_ERR_NO_MEMORY;
1385 sw_buf->num_elems = CPU_TO_LE16(1);
1387 if (lkup_type == ICE_SW_LKUP_MAC ||
1388 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1389 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1390 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1391 lkup_type == ICE_SW_LKUP_PROMISC ||
1392 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1393 lkup_type == ICE_SW_LKUP_LAST) {
1394 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1395 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1397 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1399 status = ICE_ERR_PARAM;
1400 goto ice_aq_alloc_free_vsi_list_exit;
1403 if (opc == ice_aqc_opc_free_res)
1404 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1406 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1408 goto ice_aq_alloc_free_vsi_list_exit;
1410 if (opc == ice_aqc_opc_alloc_res) {
1411 vsi_ele = &sw_buf->elem[0];
1412 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1415 ice_aq_alloc_free_vsi_list_exit:
1416 ice_free(hw, sw_buf);
1421 * ice_aq_set_storm_ctrl - Sets storm control configuration
1422 * @hw: pointer to the HW struct
1423 * @bcast_thresh: represents the upper threshold for broadcast storm control
1424 * @mcast_thresh: represents the upper threshold for multicast storm control
1425 * @ctl_bitmask: storm control control knobs
1427 * Sets the storm control configuration (0x0280)
1430 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1433 struct ice_aqc_storm_cfg *cmd;
1434 struct ice_aq_desc desc;
1436 cmd = &desc.params.storm_conf;
1438 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1440 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1441 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1442 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1444 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1448 * ice_aq_get_storm_ctrl - gets storm control configuration
1449 * @hw: pointer to the HW struct
1450 * @bcast_thresh: represents the upper threshold for broadcast storm control
1451 * @mcast_thresh: represents the upper threshold for multicast storm control
1452 * @ctl_bitmask: storm control control knobs
1454 * Gets the storm control configuration (0x0281)
1457 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1460 enum ice_status status;
1461 struct ice_aq_desc desc;
1463 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1465 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1467 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1470 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1473 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1476 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1483 * ice_aq_sw_rules - add/update/remove switch rules
1484 * @hw: pointer to the HW struct
1485 * @rule_list: pointer to switch rule population list
1486 * @rule_list_sz: total size of the rule list in bytes
1487 * @num_rules: number of switch rules in the rule_list
1488 * @opc: switch rules population command type - pass in the command opcode
1489 * @cd: pointer to command details structure or NULL
1491 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1493 static enum ice_status
1494 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1495 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1497 struct ice_aq_desc desc;
1499 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1501 if (opc != ice_aqc_opc_add_sw_rules &&
1502 opc != ice_aqc_opc_update_sw_rules &&
1503 opc != ice_aqc_opc_remove_sw_rules)
1504 return ICE_ERR_PARAM;
1506 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1508 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1509 desc.params.sw_rules.num_rules_fltr_entry_index =
1510 CPU_TO_LE16(num_rules);
1511 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1515 * ice_aq_add_recipe - add switch recipe
1516 * @hw: pointer to the HW struct
1517 * @s_recipe_list: pointer to switch rule population list
1518 * @num_recipes: number of switch recipes in the list
1519 * @cd: pointer to command details structure or NULL
1524 ice_aq_add_recipe(struct ice_hw *hw,
1525 struct ice_aqc_recipe_data_elem *s_recipe_list,
1526 u16 num_recipes, struct ice_sq_cd *cd)
1528 struct ice_aqc_add_get_recipe *cmd;
1529 struct ice_aq_desc desc;
1532 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1533 cmd = &desc.params.add_get_recipe;
1534 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1536 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1537 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1539 buf_size = num_recipes * sizeof(*s_recipe_list);
1541 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1545 * ice_aq_get_recipe - get switch recipe
1546 * @hw: pointer to the HW struct
1547 * @s_recipe_list: pointer to switch rule population list
1548 * @num_recipes: pointer to the number of recipes (input and output)
1549 * @recipe_root: root recipe number of recipe(s) to retrieve
1550 * @cd: pointer to command details structure or NULL
1554 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1555 * On output, *num_recipes will equal the number of entries returned in
1558 * The caller must supply enough space in s_recipe_list to hold all possible
1559 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1562 ice_aq_get_recipe(struct ice_hw *hw,
1563 struct ice_aqc_recipe_data_elem *s_recipe_list,
1564 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1566 struct ice_aqc_add_get_recipe *cmd;
1567 struct ice_aq_desc desc;
1568 enum ice_status status;
1571 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1572 return ICE_ERR_PARAM;
1574 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1575 cmd = &desc.params.add_get_recipe;
1576 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1578 cmd->return_index = CPU_TO_LE16(recipe_root);
1579 cmd->num_sub_recipes = 0;
1581 buf_size = *num_recipes * sizeof(*s_recipe_list);
1583 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1584 /* cppcheck-suppress constArgument */
1585 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1591 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1592 * @hw: pointer to the HW struct
1593 * @profile_id: package profile ID to associate the recipe with
1594 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1595 * @cd: pointer to command details structure or NULL
1596 * Recipe to profile association (0x0291)
1599 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1600 struct ice_sq_cd *cd)
1602 struct ice_aqc_recipe_to_profile *cmd;
1603 struct ice_aq_desc desc;
1605 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1606 cmd = &desc.params.recipe_to_profile;
1607 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1608 cmd->profile_id = CPU_TO_LE16(profile_id);
1609 /* Set the recipe ID bit in the bitmask to let the device know which
1610 * profile we are associating the recipe to
1612 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1613 ICE_NONDMA_TO_NONDMA);
1615 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1619 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1620 * @hw: pointer to the HW struct
1621 * @profile_id: package profile ID to associate the recipe with
1622 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1623 * @cd: pointer to command details structure or NULL
1624 * Associate profile ID with given recipe (0x0293)
1627 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1628 struct ice_sq_cd *cd)
1630 struct ice_aqc_recipe_to_profile *cmd;
1631 struct ice_aq_desc desc;
1632 enum ice_status status;
1634 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1635 cmd = &desc.params.recipe_to_profile;
1636 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1637 cmd->profile_id = CPU_TO_LE16(profile_id);
1639 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1641 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1642 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1648 * ice_alloc_recipe - add recipe resource
1649 * @hw: pointer to the hardware structure
1650 * @rid: recipe ID returned as response to AQ call
1652 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1654 struct ice_aqc_alloc_free_res_elem *sw_buf;
1655 enum ice_status status;
1658 buf_len = sizeof(*sw_buf);
1659 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1661 return ICE_ERR_NO_MEMORY;
1663 sw_buf->num_elems = CPU_TO_LE16(1);
1664 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1665 ICE_AQC_RES_TYPE_S) |
1666 ICE_AQC_RES_TYPE_FLAG_SHARED);
1667 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1668 ice_aqc_opc_alloc_res, NULL);
1670 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1671 ice_free(hw, sw_buf);
1676 /* ice_init_port_info - Initialize port_info with switch configuration data
1677 * @pi: pointer to port_info
1678 * @vsi_port_num: VSI number or port number
1679 * @type: Type of switch element (port or VSI)
1680 * @swid: switch ID of the switch the element is attached to
1681 * @pf_vf_num: PF or VF number
1682 * @is_vf: true if the element is a VF, false otherwise
1685 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1686 u16 swid, u16 pf_vf_num, bool is_vf)
1689 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1690 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1692 pi->pf_vf_num = pf_vf_num;
1694 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1695 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1698 ice_debug(pi->hw, ICE_DBG_SW,
1699 "incorrect VSI/port type received\n");
1704 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1705 * @hw: pointer to the hardware structure
1707 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1709 struct ice_aqc_get_sw_cfg_resp *rbuf;
1710 enum ice_status status;
1711 u16 num_total_ports;
1717 num_total_ports = 1;
1719 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1720 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1723 return ICE_ERR_NO_MEMORY;
1725 /* Multiple calls to ice_aq_get_sw_cfg may be required
1726 * to get all the switch configuration information. The need
1727 * for additional calls is indicated by ice_aq_get_sw_cfg
1728 * writing a non-zero value in req_desc
1731 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1732 &req_desc, &num_elems, NULL);
1737 for (i = 0; i < num_elems; i++) {
1738 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1739 u16 pf_vf_num, swid, vsi_port_num;
1743 ele = rbuf[i].elements;
1744 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1745 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1747 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1748 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1750 swid = LE16_TO_CPU(ele->swid);
1752 if (LE16_TO_CPU(ele->pf_vf_num) &
1753 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1756 type = LE16_TO_CPU(ele->vsi_port_num) >>
1757 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1760 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1761 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1762 if (j == num_total_ports) {
1763 ice_debug(hw, ICE_DBG_SW,
1764 "more ports than expected\n");
1765 status = ICE_ERR_CFG;
1768 ice_init_port_info(hw->port_info,
1769 vsi_port_num, type, swid,
1777 } while (req_desc && !status);
1780 ice_free(hw, (void *)rbuf);
1785 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1786 * @hw: pointer to the hardware structure
1787 * @fi: filter info structure to fill/update
1789 * This helper function populates the lb_en and lan_en elements of the provided
1790 * ice_fltr_info struct using the switch's type and characteristics of the
1791 * switch rule being configured.
1793 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1797 if ((fi->flag & ICE_FLTR_TX) &&
1798 (fi->fltr_act == ICE_FWD_TO_VSI ||
1799 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1800 fi->fltr_act == ICE_FWD_TO_Q ||
1801 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1802 /* Setting LB for prune actions will result in replicated
1803 * packets to the internal switch that will be dropped.
1805 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1808 /* Set lan_en to TRUE if
1809 * 1. The switch is a VEB AND
1811 * 2.1 The lookup is a directional lookup like ethertype,
1812 * promiscuous, ethertype-MAC, promiscuous-VLAN
1813 * and default-port OR
1814 * 2.2 The lookup is VLAN, OR
1815 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1816 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1820 * The switch is a VEPA.
1822 * In all other cases, the LAN enable has to be set to false.
1825 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1826 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1827 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1828 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1829 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1830 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1831 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1832 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1833 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1834 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1843 * ice_fill_sw_rule - Helper function to fill switch rule structure
1844 * @hw: pointer to the hardware structure
1845 * @f_info: entry containing packet forwarding information
1846 * @s_rule: switch rule structure to be filled in based on mac_entry
1847 * @opc: switch rules population command type - pass in the command opcode
1850 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1851 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1853 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1861 if (opc == ice_aqc_opc_remove_sw_rules) {
1862 s_rule->pdata.lkup_tx_rx.act = 0;
1863 s_rule->pdata.lkup_tx_rx.index =
1864 CPU_TO_LE16(f_info->fltr_rule_id);
1865 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1869 eth_hdr_sz = sizeof(dummy_eth_header);
1870 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1872 /* initialize the ether header with a dummy header */
1873 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1874 ice_fill_sw_info(hw, f_info);
1876 switch (f_info->fltr_act) {
1877 case ICE_FWD_TO_VSI:
1878 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1879 ICE_SINGLE_ACT_VSI_ID_M;
1880 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1881 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1882 ICE_SINGLE_ACT_VALID_BIT;
1884 case ICE_FWD_TO_VSI_LIST:
1885 act |= ICE_SINGLE_ACT_VSI_LIST;
1886 act |= (f_info->fwd_id.vsi_list_id <<
1887 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1888 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1889 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1890 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1891 ICE_SINGLE_ACT_VALID_BIT;
1894 act |= ICE_SINGLE_ACT_TO_Q;
1895 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1896 ICE_SINGLE_ACT_Q_INDEX_M;
1898 case ICE_DROP_PACKET:
1899 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1900 ICE_SINGLE_ACT_VALID_BIT;
1902 case ICE_FWD_TO_QGRP:
1903 q_rgn = f_info->qgrp_size > 0 ?
1904 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1905 act |= ICE_SINGLE_ACT_TO_Q;
1906 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1907 ICE_SINGLE_ACT_Q_INDEX_M;
1908 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1909 ICE_SINGLE_ACT_Q_REGION_M;
1916 act |= ICE_SINGLE_ACT_LB_ENABLE;
1918 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1920 switch (f_info->lkup_type) {
1921 case ICE_SW_LKUP_MAC:
1922 daddr = f_info->l_data.mac.mac_addr;
1924 case ICE_SW_LKUP_VLAN:
1925 vlan_id = f_info->l_data.vlan.vlan_id;
1926 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1927 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1928 act |= ICE_SINGLE_ACT_PRUNE;
1929 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1932 case ICE_SW_LKUP_ETHERTYPE_MAC:
1933 daddr = f_info->l_data.ethertype_mac.mac_addr;
1935 case ICE_SW_LKUP_ETHERTYPE:
1936 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1937 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1939 case ICE_SW_LKUP_MAC_VLAN:
1940 daddr = f_info->l_data.mac_vlan.mac_addr;
1941 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1943 case ICE_SW_LKUP_PROMISC_VLAN:
1944 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1946 case ICE_SW_LKUP_PROMISC:
1947 daddr = f_info->l_data.mac_vlan.mac_addr;
1953 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1954 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1955 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1957 /* Recipe set depending on lookup type */
1958 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1959 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1960 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1963 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1964 ICE_NONDMA_TO_NONDMA);
1966 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1967 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1968 *off = CPU_TO_BE16(vlan_id);
1971 /* Create the switch rule with the final dummy Ethernet header */
1972 if (opc != ice_aqc_opc_update_sw_rules)
1973 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1977 * ice_add_marker_act
1978 * @hw: pointer to the hardware structure
1979 * @m_ent: the management entry for which sw marker needs to be added
1980 * @sw_marker: sw marker to tag the Rx descriptor with
1981 * @l_id: large action resource ID
1983 * Create a large action to hold software marker and update the switch rule
1984 * entry pointed by m_ent with newly created large action
1986 static enum ice_status
1987 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1988 u16 sw_marker, u16 l_id)
1990 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1991 /* For software marker we need 3 large actions
1992 * 1. FWD action: FWD TO VSI or VSI LIST
1993 * 2. GENERIC VALUE action to hold the profile ID
1994 * 3. GENERIC VALUE action to hold the software marker ID
1996 const u16 num_lg_acts = 3;
1997 enum ice_status status;
2003 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2004 return ICE_ERR_PARAM;
2006 /* Create two back-to-back switch rules and submit them to the HW using
2007 * one memory buffer:
2011 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2012 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2013 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2015 return ICE_ERR_NO_MEMORY;
2017 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2019 /* Fill in the first switch rule i.e. large action */
2020 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2021 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2022 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2024 /* First action VSI forwarding or VSI list forwarding depending on how
2027 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2028 m_ent->fltr_info.fwd_id.hw_vsi_id;
2030 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2031 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2032 ICE_LG_ACT_VSI_LIST_ID_M;
2033 if (m_ent->vsi_count > 1)
2034 act |= ICE_LG_ACT_VSI_LIST;
2035 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2037 /* Second action descriptor type */
2038 act = ICE_LG_ACT_GENERIC;
2040 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2041 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2043 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2044 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2046 /* Third action Marker value */
2047 act |= ICE_LG_ACT_GENERIC;
2048 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2049 ICE_LG_ACT_GENERIC_VALUE_M;
2051 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2053 /* call the fill switch rule to fill the lookup Tx Rx structure */
2054 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2055 ice_aqc_opc_update_sw_rules);
2057 /* Update the action to point to the large action ID */
2058 rx_tx->pdata.lkup_tx_rx.act =
2059 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2060 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2061 ICE_SINGLE_ACT_PTR_VAL_M));
2063 /* Use the filter rule ID of the previously created rule with single
2064 * act. Once the update happens, hardware will treat this as large
2067 rx_tx->pdata.lkup_tx_rx.index =
2068 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2070 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2071 ice_aqc_opc_update_sw_rules, NULL);
2073 m_ent->lg_act_idx = l_id;
2074 m_ent->sw_marker_id = sw_marker;
2077 ice_free(hw, lg_act);
2082 * ice_add_counter_act - add/update filter rule with counter action
2083 * @hw: pointer to the hardware structure
2084 * @m_ent: the management entry for which counter needs to be added
2085 * @counter_id: VLAN counter ID returned as part of allocate resource
2086 * @l_id: large action resource ID
2088 static enum ice_status
2089 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2090 u16 counter_id, u16 l_id)
2092 struct ice_aqc_sw_rules_elem *lg_act;
2093 struct ice_aqc_sw_rules_elem *rx_tx;
2094 enum ice_status status;
2095 /* 2 actions will be added while adding a large action counter */
2096 const int num_acts = 2;
2103 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2104 return ICE_ERR_PARAM;
2106 /* Create two back-to-back switch rules and submit them to the HW using
2107 * one memory buffer:
2111 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2112 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2113 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2116 return ICE_ERR_NO_MEMORY;
2118 rx_tx = (struct ice_aqc_sw_rules_elem *)
2119 ((u8 *)lg_act + lg_act_size);
2121 /* Fill in the first switch rule i.e. large action */
2122 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2123 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2124 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2126 /* First action VSI forwarding or VSI list forwarding depending on how
2129 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2130 m_ent->fltr_info.fwd_id.hw_vsi_id;
2132 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2133 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2134 ICE_LG_ACT_VSI_LIST_ID_M;
2135 if (m_ent->vsi_count > 1)
2136 act |= ICE_LG_ACT_VSI_LIST;
2137 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2139 /* Second action counter ID */
2140 act = ICE_LG_ACT_STAT_COUNT;
2141 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2142 ICE_LG_ACT_STAT_COUNT_M;
2143 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2145 /* call the fill switch rule to fill the lookup Tx Rx structure */
2146 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2147 ice_aqc_opc_update_sw_rules);
2149 act = ICE_SINGLE_ACT_PTR;
2150 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2151 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2153 /* Use the filter rule ID of the previously created rule with single
2154 * act. Once the update happens, hardware will treat this as large
2157 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2158 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2160 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2161 ice_aqc_opc_update_sw_rules, NULL);
2163 m_ent->lg_act_idx = l_id;
2164 m_ent->counter_index = counter_id;
2167 ice_free(hw, lg_act);
2172 * ice_create_vsi_list_map
2173 * @hw: pointer to the hardware structure
2174 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2175 * @num_vsi: number of VSI handles in the array
2176 * @vsi_list_id: VSI list ID generated as part of allocate resource
2178 * Helper function to create a new entry of VSI list ID to VSI mapping
2179 * using the given VSI list ID
2181 static struct ice_vsi_list_map_info *
2182 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2185 struct ice_switch_info *sw = hw->switch_info;
2186 struct ice_vsi_list_map_info *v_map;
2189 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2194 v_map->vsi_list_id = vsi_list_id;
2196 for (i = 0; i < num_vsi; i++)
2197 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2199 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2204 * ice_update_vsi_list_rule
2205 * @hw: pointer to the hardware structure
2206 * @vsi_handle_arr: array of VSI handles to form a VSI list
2207 * @num_vsi: number of VSI handles in the array
2208 * @vsi_list_id: VSI list ID generated as part of allocate resource
2209 * @remove: Boolean value to indicate if this is a remove action
2210 * @opc: switch rules population command type - pass in the command opcode
2211 * @lkup_type: lookup type of the filter
2213 * Call AQ command to add a new switch rule or update existing switch rule
2214 * using the given VSI list ID
2216 static enum ice_status
2217 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2218 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2219 enum ice_sw_lkup_type lkup_type)
2221 struct ice_aqc_sw_rules_elem *s_rule;
2222 enum ice_status status;
2228 return ICE_ERR_PARAM;
2230 if (lkup_type == ICE_SW_LKUP_MAC ||
2231 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2232 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2233 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2234 lkup_type == ICE_SW_LKUP_PROMISC ||
2235 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2236 lkup_type == ICE_SW_LKUP_LAST)
2237 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2238 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2239 else if (lkup_type == ICE_SW_LKUP_VLAN)
2240 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2241 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2243 return ICE_ERR_PARAM;
2245 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2246 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2248 return ICE_ERR_NO_MEMORY;
2249 for (i = 0; i < num_vsi; i++) {
2250 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2251 status = ICE_ERR_PARAM;
2254 /* AQ call requires hw_vsi_id(s) */
2255 s_rule->pdata.vsi_list.vsi[i] =
2256 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2259 s_rule->type = CPU_TO_LE16(type);
2260 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2261 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2263 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2266 ice_free(hw, s_rule);
2271 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2272 * @hw: pointer to the HW struct
2273 * @vsi_handle_arr: array of VSI handles to form a VSI list
2274 * @num_vsi: number of VSI handles in the array
2275 * @vsi_list_id: stores the ID of the VSI list to be created
2276 * @lkup_type: switch rule filter's lookup type
2278 static enum ice_status
2279 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2280 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2282 enum ice_status status;
2284 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2285 ice_aqc_opc_alloc_res);
2289 /* Update the newly created VSI list to include the specified VSIs */
2290 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2291 *vsi_list_id, false,
2292 ice_aqc_opc_add_sw_rules, lkup_type);
2296 * ice_create_pkt_fwd_rule
2297 * @hw: pointer to the hardware structure
2298 * @f_entry: entry containing packet forwarding information
2300 * Create switch rule with given filter information and add an entry
2301 * to the corresponding filter management list to track this switch rule
2304 static enum ice_status
2305 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2306 struct ice_fltr_list_entry *f_entry)
2308 struct ice_fltr_mgmt_list_entry *fm_entry;
2309 struct ice_aqc_sw_rules_elem *s_rule;
2310 enum ice_sw_lkup_type l_type;
2311 struct ice_sw_recipe *recp;
2312 enum ice_status status;
2314 s_rule = (struct ice_aqc_sw_rules_elem *)
2315 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2317 return ICE_ERR_NO_MEMORY;
2318 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2319 ice_malloc(hw, sizeof(*fm_entry));
2321 status = ICE_ERR_NO_MEMORY;
2322 goto ice_create_pkt_fwd_rule_exit;
2325 fm_entry->fltr_info = f_entry->fltr_info;
2327 /* Initialize all the fields for the management entry */
2328 fm_entry->vsi_count = 1;
2329 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2330 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2331 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2333 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2334 ice_aqc_opc_add_sw_rules);
2336 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2337 ice_aqc_opc_add_sw_rules, NULL);
2339 ice_free(hw, fm_entry);
2340 goto ice_create_pkt_fwd_rule_exit;
2343 f_entry->fltr_info.fltr_rule_id =
2344 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2345 fm_entry->fltr_info.fltr_rule_id =
2346 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2348 /* The book keeping entries will get removed when base driver
2349 * calls remove filter AQ command
2351 l_type = fm_entry->fltr_info.lkup_type;
2352 recp = &hw->switch_info->recp_list[l_type];
2353 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2355 ice_create_pkt_fwd_rule_exit:
2356 ice_free(hw, s_rule);
2361 * ice_update_pkt_fwd_rule
2362 * @hw: pointer to the hardware structure
2363 * @f_info: filter information for switch rule
2365 * Call AQ command to update a previously created switch rule with a
2368 static enum ice_status
2369 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2371 struct ice_aqc_sw_rules_elem *s_rule;
2372 enum ice_status status;
2374 s_rule = (struct ice_aqc_sw_rules_elem *)
2375 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2377 return ICE_ERR_NO_MEMORY;
2379 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2381 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2383 /* Update switch rule with new rule set to forward VSI list */
2384 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2385 ice_aqc_opc_update_sw_rules, NULL);
2387 ice_free(hw, s_rule);
2392 * ice_update_sw_rule_bridge_mode
2393 * @hw: pointer to the HW struct
2395 * Updates unicast switch filter rules based on VEB/VEPA mode
2397 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2399 struct ice_switch_info *sw = hw->switch_info;
2400 struct ice_fltr_mgmt_list_entry *fm_entry;
2401 enum ice_status status = ICE_SUCCESS;
2402 struct LIST_HEAD_TYPE *rule_head;
2403 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2405 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2406 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2408 ice_acquire_lock(rule_lock);
2409 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2411 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2412 u8 *addr = fi->l_data.mac.mac_addr;
2414 /* Update unicast Tx rules to reflect the selected
2417 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2418 (fi->fltr_act == ICE_FWD_TO_VSI ||
2419 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2420 fi->fltr_act == ICE_FWD_TO_Q ||
2421 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2422 status = ice_update_pkt_fwd_rule(hw, fi);
2428 ice_release_lock(rule_lock);
2434 * ice_add_update_vsi_list
2435 * @hw: pointer to the hardware structure
2436 * @m_entry: pointer to current filter management list entry
2437 * @cur_fltr: filter information from the book keeping entry
2438 * @new_fltr: filter information with the new VSI to be added
2440 * Call AQ command to add or update previously created VSI list with new VSI.
2442 * Helper function to do book keeping associated with adding filter information
2443 * The algorithm to do the book keeping is described below :
2444 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2445 * if only one VSI has been added till now
2446 * Allocate a new VSI list and add two VSIs
2447 * to this list using switch rule command
2448 * Update the previously created switch rule with the
2449 * newly created VSI list ID
2450 * if a VSI list was previously created
2451 * Add the new VSI to the previously created VSI list set
2452 * using the update switch rule command
2454 static enum ice_status
2455 ice_add_update_vsi_list(struct ice_hw *hw,
2456 struct ice_fltr_mgmt_list_entry *m_entry,
2457 struct ice_fltr_info *cur_fltr,
2458 struct ice_fltr_info *new_fltr)
2460 enum ice_status status = ICE_SUCCESS;
2461 u16 vsi_list_id = 0;
2463 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2464 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2465 return ICE_ERR_NOT_IMPL;
2467 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2468 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2469 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2470 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2471 return ICE_ERR_NOT_IMPL;
2473 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2474 /* Only one entry existed in the mapping and it was not already
2475 * a part of a VSI list. So, create a VSI list with the old and
2478 struct ice_fltr_info tmp_fltr;
2479 u16 vsi_handle_arr[2];
2481 /* A rule already exists with the new VSI being added */
2482 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2483 return ICE_ERR_ALREADY_EXISTS;
2485 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2486 vsi_handle_arr[1] = new_fltr->vsi_handle;
2487 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2489 new_fltr->lkup_type);
2493 tmp_fltr = *new_fltr;
2494 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2495 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2496 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2497 /* Update the previous switch rule of "MAC forward to VSI" to
2498 * "MAC fwd to VSI list"
2500 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2504 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2505 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2506 m_entry->vsi_list_info =
2507 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2510 /* If this entry was large action then the large action needs
2511 * to be updated to point to FWD to VSI list
2513 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2515 ice_add_marker_act(hw, m_entry,
2516 m_entry->sw_marker_id,
2517 m_entry->lg_act_idx);
2519 u16 vsi_handle = new_fltr->vsi_handle;
2520 enum ice_adminq_opc opcode;
2522 if (!m_entry->vsi_list_info)
2525 /* A rule already exists with the new VSI being added */
2526 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2529 /* Update the previously created VSI list set with
2530 * the new VSI ID passed in
2532 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2533 opcode = ice_aqc_opc_update_sw_rules;
2535 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2536 vsi_list_id, false, opcode,
2537 new_fltr->lkup_type);
2538 /* update VSI list mapping info with new VSI ID */
2540 ice_set_bit(vsi_handle,
2541 m_entry->vsi_list_info->vsi_map);
2544 m_entry->vsi_count++;
2549 * ice_find_rule_entry - Search a rule entry
2550 * @hw: pointer to the hardware structure
2551 * @recp_id: lookup type for which the specified rule needs to be searched
2552 * @f_info: rule information
2554 * Helper function to search for a given rule entry
2555 * Returns pointer to entry storing the rule if found
2557 static struct ice_fltr_mgmt_list_entry *
2558 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2560 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2561 struct ice_switch_info *sw = hw->switch_info;
2562 struct LIST_HEAD_TYPE *list_head;
2564 list_head = &sw->recp_list[recp_id].filt_rules;
2565 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2567 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2568 sizeof(f_info->l_data)) &&
2569 f_info->flag == list_itr->fltr_info.flag) {
2578 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2579 * @hw: pointer to the hardware structure
2580 * @recp_id: lookup type for which VSI lists needs to be searched
2581 * @vsi_handle: VSI handle to be found in VSI list
2582 * @vsi_list_id: VSI list ID found containing vsi_handle
2584 * Helper function to search a VSI list with single entry containing given VSI
2585 * handle element. This can be extended further to search VSI list with more
2586 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2588 static struct ice_vsi_list_map_info *
2589 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2592 struct ice_vsi_list_map_info *map_info = NULL;
2593 struct ice_switch_info *sw = hw->switch_info;
2594 struct LIST_HEAD_TYPE *list_head;
2596 list_head = &sw->recp_list[recp_id].filt_rules;
2597 if (sw->recp_list[recp_id].adv_rule) {
2598 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2600 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2601 ice_adv_fltr_mgmt_list_entry,
2603 if (list_itr->vsi_list_info) {
2604 map_info = list_itr->vsi_list_info;
2605 if (ice_is_bit_set(map_info->vsi_map,
2607 *vsi_list_id = map_info->vsi_list_id;
2613 struct ice_fltr_mgmt_list_entry *list_itr;
2615 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2616 ice_fltr_mgmt_list_entry,
2618 if (list_itr->vsi_count == 1 &&
2619 list_itr->vsi_list_info) {
2620 map_info = list_itr->vsi_list_info;
2621 if (ice_is_bit_set(map_info->vsi_map,
2623 *vsi_list_id = map_info->vsi_list_id;
2633 * ice_add_rule_internal - add rule for a given lookup type
2634 * @hw: pointer to the hardware structure
2635 * @recp_id: lookup type (recipe ID) for which rule has to be added
2636 * @f_entry: structure containing MAC forwarding information
2638 * Adds or updates the rule lists for a given recipe
2640 static enum ice_status
2641 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2642 struct ice_fltr_list_entry *f_entry)
2644 struct ice_switch_info *sw = hw->switch_info;
2645 struct ice_fltr_info *new_fltr, *cur_fltr;
2646 struct ice_fltr_mgmt_list_entry *m_entry;
2647 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2648 enum ice_status status = ICE_SUCCESS;
2650 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2651 return ICE_ERR_PARAM;
2653 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2654 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2655 f_entry->fltr_info.fwd_id.hw_vsi_id =
2656 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2658 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2660 ice_acquire_lock(rule_lock);
2661 new_fltr = &f_entry->fltr_info;
2662 if (new_fltr->flag & ICE_FLTR_RX)
2663 new_fltr->src = hw->port_info->lport;
2664 else if (new_fltr->flag & ICE_FLTR_TX)
2666 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2668 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2670 status = ice_create_pkt_fwd_rule(hw, f_entry);
2671 goto exit_add_rule_internal;
2674 cur_fltr = &m_entry->fltr_info;
2675 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2677 exit_add_rule_internal:
2678 ice_release_lock(rule_lock);
2683 * ice_remove_vsi_list_rule
2684 * @hw: pointer to the hardware structure
2685 * @vsi_list_id: VSI list ID generated as part of allocate resource
2686 * @lkup_type: switch rule filter lookup type
2688 * The VSI list should be emptied before this function is called to remove the
2691 static enum ice_status
2692 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2693 enum ice_sw_lkup_type lkup_type)
2695 struct ice_aqc_sw_rules_elem *s_rule;
2696 enum ice_status status;
2699 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2700 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2702 return ICE_ERR_NO_MEMORY;
2704 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2705 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2707 /* Free the vsi_list resource that we allocated. It is assumed that the
2708 * list is empty at this point.
2710 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2711 ice_aqc_opc_free_res);
2713 ice_free(hw, s_rule);
2718 * ice_rem_update_vsi_list
2719 * @hw: pointer to the hardware structure
2720 * @vsi_handle: VSI handle of the VSI to remove
2721 * @fm_list: filter management entry for which the VSI list management needs to
2724 static enum ice_status
2725 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2726 struct ice_fltr_mgmt_list_entry *fm_list)
2728 enum ice_sw_lkup_type lkup_type;
2729 enum ice_status status = ICE_SUCCESS;
2732 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2733 fm_list->vsi_count == 0)
2734 return ICE_ERR_PARAM;
2736 /* A rule with the VSI being removed does not exist */
2737 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2738 return ICE_ERR_DOES_NOT_EXIST;
2740 lkup_type = fm_list->fltr_info.lkup_type;
2741 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2742 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2743 ice_aqc_opc_update_sw_rules,
2748 fm_list->vsi_count--;
2749 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2751 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2752 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2753 struct ice_vsi_list_map_info *vsi_list_info =
2754 fm_list->vsi_list_info;
2757 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2759 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2760 return ICE_ERR_OUT_OF_RANGE;
2762 /* Make sure VSI list is empty before removing it below */
2763 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2765 ice_aqc_opc_update_sw_rules,
2770 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2771 tmp_fltr_info.fwd_id.hw_vsi_id =
2772 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2773 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2774 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2776 ice_debug(hw, ICE_DBG_SW,
2777 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2778 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2782 fm_list->fltr_info = tmp_fltr_info;
2785 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2786 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2787 struct ice_vsi_list_map_info *vsi_list_info =
2788 fm_list->vsi_list_info;
2790 /* Remove the VSI list since it is no longer used */
2791 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2793 ice_debug(hw, ICE_DBG_SW,
2794 "Failed to remove VSI list %d, error %d\n",
2795 vsi_list_id, status);
2799 LIST_DEL(&vsi_list_info->list_entry);
2800 ice_free(hw, vsi_list_info);
2801 fm_list->vsi_list_info = NULL;
2808 * ice_remove_rule_internal - Remove a filter rule of a given type
2810 * @hw: pointer to the hardware structure
2811 * @recp_id: recipe ID for which the rule needs to removed
2812 * @f_entry: rule entry containing filter information
2814 static enum ice_status
2815 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2816 struct ice_fltr_list_entry *f_entry)
2818 struct ice_switch_info *sw = hw->switch_info;
2819 struct ice_fltr_mgmt_list_entry *list_elem;
2820 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2821 enum ice_status status = ICE_SUCCESS;
2822 bool remove_rule = false;
2825 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2826 return ICE_ERR_PARAM;
2827 f_entry->fltr_info.fwd_id.hw_vsi_id =
2828 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2830 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2831 ice_acquire_lock(rule_lock);
2832 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2834 status = ICE_ERR_DOES_NOT_EXIST;
2838 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2840 } else if (!list_elem->vsi_list_info) {
2841 status = ICE_ERR_DOES_NOT_EXIST;
2843 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2844 /* a ref_cnt > 1 indicates that the vsi_list is being
2845 * shared by multiple rules. Decrement the ref_cnt and
2846 * remove this rule, but do not modify the list, as it
2847 * is in-use by other rules.
2849 list_elem->vsi_list_info->ref_cnt--;
2852 /* a ref_cnt of 1 indicates the vsi_list is only used
2853 * by one rule. However, the original removal request is only
2854 * for a single VSI. Update the vsi_list first, and only
2855 * remove the rule if there are no further VSIs in this list.
2857 vsi_handle = f_entry->fltr_info.vsi_handle;
2858 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2861 /* if VSI count goes to zero after updating the VSI list */
2862 if (list_elem->vsi_count == 0)
2867 /* Remove the lookup rule */
2868 struct ice_aqc_sw_rules_elem *s_rule;
2870 s_rule = (struct ice_aqc_sw_rules_elem *)
2871 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2873 status = ICE_ERR_NO_MEMORY;
2877 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2878 ice_aqc_opc_remove_sw_rules);
2880 status = ice_aq_sw_rules(hw, s_rule,
2881 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2882 ice_aqc_opc_remove_sw_rules, NULL);
2884 /* Remove a book keeping from the list */
2885 ice_free(hw, s_rule);
2890 LIST_DEL(&list_elem->list_entry);
2891 ice_free(hw, list_elem);
2894 ice_release_lock(rule_lock);
2899 * ice_aq_get_res_alloc - get allocated resources
2900 * @hw: pointer to the HW struct
2901 * @num_entries: pointer to u16 to store the number of resource entries returned
2902 * @buf: pointer to user-supplied buffer
2903 * @buf_size: size of buff
2904 * @cd: pointer to command details structure or NULL
2906 * The user-supplied buffer must be large enough to store the resource
2907 * information for all resource types. Each resource type is an
2908 * ice_aqc_get_res_resp_data_elem structure.
2911 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2912 u16 buf_size, struct ice_sq_cd *cd)
2914 struct ice_aqc_get_res_alloc *resp;
2915 enum ice_status status;
2916 struct ice_aq_desc desc;
2919 return ICE_ERR_BAD_PTR;
2921 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2922 return ICE_ERR_INVAL_SIZE;
2924 resp = &desc.params.get_res;
2926 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2927 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2929 if (!status && num_entries)
2930 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2936 * ice_aq_get_res_descs - get allocated resource descriptors
2937 * @hw: pointer to the hardware structure
2938 * @num_entries: number of resource entries in buffer
2939 * @buf: Indirect buffer to hold data parameters and response
2940 * @buf_size: size of buffer for indirect commands
2941 * @res_type: resource type
2942 * @res_shared: is resource shared
2943 * @desc_id: input - first desc ID to start; output - next desc ID
2944 * @cd: pointer to command details structure or NULL
2947 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2948 struct ice_aqc_get_allocd_res_desc_resp *buf,
2949 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2950 struct ice_sq_cd *cd)
2952 struct ice_aqc_get_allocd_res_desc *cmd;
2953 struct ice_aq_desc desc;
2954 enum ice_status status;
2956 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2958 cmd = &desc.params.get_res_desc;
2961 return ICE_ERR_PARAM;
2963 if (buf_size != (num_entries * sizeof(*buf)))
2964 return ICE_ERR_PARAM;
2966 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2968 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2969 ICE_AQC_RES_TYPE_M) | (res_shared ?
2970 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2971 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2973 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2975 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2981 * ice_add_mac - Add a MAC address based filter rule
2982 * @hw: pointer to the hardware structure
2983 * @m_list: list of MAC addresses and forwarding information
2985 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2986 * multiple unicast addresses, the function assumes that all the
2987 * addresses are unique in a given add_mac call. It doesn't
2988 * check for duplicates in this case, removing duplicates from a given
2989 * list should be taken care of in the caller of this function.
2992 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2994 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2995 struct ice_fltr_list_entry *m_list_itr;
2996 struct LIST_HEAD_TYPE *rule_head;
2997 u16 elem_sent, total_elem_left;
2998 struct ice_switch_info *sw;
2999 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3000 enum ice_status status = ICE_SUCCESS;
3001 u16 num_unicast = 0;
3005 return ICE_ERR_PARAM;
3007 sw = hw->switch_info;
3008 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3009 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3011 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3015 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3016 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3017 if (!ice_is_vsi_valid(hw, vsi_handle))
3018 return ICE_ERR_PARAM;
3019 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3020 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3021 /* update the src in case it is VSI num */
3022 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3023 return ICE_ERR_PARAM;
3024 m_list_itr->fltr_info.src = hw_vsi_id;
3025 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3026 IS_ZERO_ETHER_ADDR(add))
3027 return ICE_ERR_PARAM;
3028 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3029 /* Don't overwrite the unicast address */
3030 ice_acquire_lock(rule_lock);
3031 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3032 &m_list_itr->fltr_info)) {
3033 ice_release_lock(rule_lock);
3034 return ICE_ERR_ALREADY_EXISTS;
3036 ice_release_lock(rule_lock);
3038 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3039 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3040 m_list_itr->status =
3041 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3043 if (m_list_itr->status)
3044 return m_list_itr->status;
3048 ice_acquire_lock(rule_lock);
3049 /* Exit if no suitable entries were found for adding bulk switch rule */
3051 status = ICE_SUCCESS;
3052 goto ice_add_mac_exit;
3055 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3057 /* Allocate switch rule buffer for the bulk update for unicast */
3058 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3059 s_rule = (struct ice_aqc_sw_rules_elem *)
3060 ice_calloc(hw, num_unicast, s_rule_size);
3062 status = ICE_ERR_NO_MEMORY;
3063 goto ice_add_mac_exit;
3067 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3069 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3070 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3072 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3073 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3074 ice_aqc_opc_add_sw_rules);
3075 r_iter = (struct ice_aqc_sw_rules_elem *)
3076 ((u8 *)r_iter + s_rule_size);
3080 /* Call AQ bulk switch rule update for all unicast addresses */
3082 /* Call AQ switch rule in AQ_MAX chunk */
3083 for (total_elem_left = num_unicast; total_elem_left > 0;
3084 total_elem_left -= elem_sent) {
3085 struct ice_aqc_sw_rules_elem *entry = r_iter;
3087 elem_sent = min(total_elem_left,
3088 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3089 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3090 elem_sent, ice_aqc_opc_add_sw_rules,
3093 goto ice_add_mac_exit;
3094 r_iter = (struct ice_aqc_sw_rules_elem *)
3095 ((u8 *)r_iter + (elem_sent * s_rule_size));
3098 /* Fill up rule ID based on the value returned from FW */
3100 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3102 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3103 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3104 struct ice_fltr_mgmt_list_entry *fm_entry;
3106 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3107 f_info->fltr_rule_id =
3108 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3109 f_info->fltr_act = ICE_FWD_TO_VSI;
3110 /* Create an entry to track this MAC address */
3111 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3112 ice_malloc(hw, sizeof(*fm_entry));
3114 status = ICE_ERR_NO_MEMORY;
3115 goto ice_add_mac_exit;
3117 fm_entry->fltr_info = *f_info;
3118 fm_entry->vsi_count = 1;
3119 /* The book keeping entries will get removed when
3120 * base driver calls remove filter AQ command
3123 LIST_ADD(&fm_entry->list_entry, rule_head);
3124 r_iter = (struct ice_aqc_sw_rules_elem *)
3125 ((u8 *)r_iter + s_rule_size);
3130 ice_release_lock(rule_lock);
3132 ice_free(hw, s_rule);
3137 * ice_add_vlan_internal - Add one VLAN based filter rule
3138 * @hw: pointer to the hardware structure
3139 * @f_entry: filter entry containing one VLAN information
3141 static enum ice_status
3142 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3144 struct ice_switch_info *sw = hw->switch_info;
3145 struct ice_fltr_mgmt_list_entry *v_list_itr;
3146 struct ice_fltr_info *new_fltr, *cur_fltr;
3147 enum ice_sw_lkup_type lkup_type;
3148 u16 vsi_list_id = 0, vsi_handle;
3149 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3150 enum ice_status status = ICE_SUCCESS;
3152 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3153 return ICE_ERR_PARAM;
3155 f_entry->fltr_info.fwd_id.hw_vsi_id =
3156 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3157 new_fltr = &f_entry->fltr_info;
3159 /* VLAN ID should only be 12 bits */
3160 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3161 return ICE_ERR_PARAM;
3163 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3164 return ICE_ERR_PARAM;
3166 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3167 lkup_type = new_fltr->lkup_type;
3168 vsi_handle = new_fltr->vsi_handle;
3169 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3170 ice_acquire_lock(rule_lock);
3171 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3173 struct ice_vsi_list_map_info *map_info = NULL;
3175 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3176 /* All VLAN pruning rules use a VSI list. Check if
3177 * there is already a VSI list containing VSI that we
3178 * want to add. If found, use the same vsi_list_id for
3179 * this new VLAN rule or else create a new list.
3181 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3185 status = ice_create_vsi_list_rule(hw,
3193 /* Convert the action to forwarding to a VSI list. */
3194 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3195 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3198 status = ice_create_pkt_fwd_rule(hw, f_entry);
3200 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3203 status = ICE_ERR_DOES_NOT_EXIST;
3206 /* reuse VSI list for new rule and increment ref_cnt */
3208 v_list_itr->vsi_list_info = map_info;
3209 map_info->ref_cnt++;
3211 v_list_itr->vsi_list_info =
3212 ice_create_vsi_list_map(hw, &vsi_handle,
3216 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3217 /* Update existing VSI list to add new VSI ID only if it used
3220 cur_fltr = &v_list_itr->fltr_info;
3221 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3224 /* If VLAN rule exists and VSI list being used by this rule is
3225 * referenced by more than 1 VLAN rule. Then create a new VSI
3226 * list appending previous VSI with new VSI and update existing
3227 * VLAN rule to point to new VSI list ID
3229 struct ice_fltr_info tmp_fltr;
3230 u16 vsi_handle_arr[2];
3233 /* Current implementation only supports reusing VSI list with
3234 * one VSI count. We should never hit below condition
3236 if (v_list_itr->vsi_count > 1 &&
3237 v_list_itr->vsi_list_info->ref_cnt > 1) {
3238 ice_debug(hw, ICE_DBG_SW,
3239 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3240 status = ICE_ERR_CFG;
3245 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3248 /* A rule already exists with the new VSI being added */
3249 if (cur_handle == vsi_handle) {
3250 status = ICE_ERR_ALREADY_EXISTS;
3254 vsi_handle_arr[0] = cur_handle;
3255 vsi_handle_arr[1] = vsi_handle;
3256 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3257 &vsi_list_id, lkup_type);
3261 tmp_fltr = v_list_itr->fltr_info;
3262 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3263 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3264 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3265 /* Update the previous switch rule to a new VSI list which
3266 * includes current VSI that is requested
3268 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3272 /* before overriding VSI list map info. decrement ref_cnt of
3275 v_list_itr->vsi_list_info->ref_cnt--;
3277 /* now update to newly created list */
3278 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3279 v_list_itr->vsi_list_info =
3280 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3282 v_list_itr->vsi_count++;
3286 ice_release_lock(rule_lock);
3291 * ice_add_vlan - Add VLAN based filter rule
3292 * @hw: pointer to the hardware structure
3293 * @v_list: list of VLAN entries and forwarding information
3296 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3298 struct ice_fltr_list_entry *v_list_itr;
3301 return ICE_ERR_PARAM;
3303 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3305 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3306 return ICE_ERR_PARAM;
3307 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3308 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3309 if (v_list_itr->status)
3310 return v_list_itr->status;
3316 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3317 * @hw: pointer to the hardware structure
3318 * @mv_list: list of MAC and VLAN filters
3320 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3321 * pruning bits enabled, then it is the responsibility of the caller to make
3322 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3323 * VLAN won't be received on that VSI otherwise.
3326 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3328 struct ice_fltr_list_entry *mv_list_itr;
3330 if (!mv_list || !hw)
3331 return ICE_ERR_PARAM;
3333 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3335 enum ice_sw_lkup_type l_type =
3336 mv_list_itr->fltr_info.lkup_type;
3338 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3339 return ICE_ERR_PARAM;
3340 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3341 mv_list_itr->status =
3342 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3344 if (mv_list_itr->status)
3345 return mv_list_itr->status;
3351 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3352 * @hw: pointer to the hardware structure
3353 * @em_list: list of ether type MAC filter, MAC is optional
3355 * This function requires the caller to populate the entries in
3356 * the filter list with the necessary fields (including flags to
3357 * indicate Tx or Rx rules).
3360 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3362 struct ice_fltr_list_entry *em_list_itr;
3364 if (!em_list || !hw)
3365 return ICE_ERR_PARAM;
3367 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3369 enum ice_sw_lkup_type l_type =
3370 em_list_itr->fltr_info.lkup_type;
3372 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3373 l_type != ICE_SW_LKUP_ETHERTYPE)
3374 return ICE_ERR_PARAM;
3376 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3378 if (em_list_itr->status)
3379 return em_list_itr->status;
3385 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3386 * @hw: pointer to the hardware structure
3387 * @em_list: list of ethertype or ethertype MAC entries
3390 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3392 struct ice_fltr_list_entry *em_list_itr, *tmp;
3394 if (!em_list || !hw)
3395 return ICE_ERR_PARAM;
3397 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3399 enum ice_sw_lkup_type l_type =
3400 em_list_itr->fltr_info.lkup_type;
3402 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3403 l_type != ICE_SW_LKUP_ETHERTYPE)
3404 return ICE_ERR_PARAM;
3406 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3408 if (em_list_itr->status)
3409 return em_list_itr->status;
3415 * ice_rem_sw_rule_info
3416 * @hw: pointer to the hardware structure
3417 * @rule_head: pointer to the switch list structure that we want to delete
3420 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3422 if (!LIST_EMPTY(rule_head)) {
3423 struct ice_fltr_mgmt_list_entry *entry;
3424 struct ice_fltr_mgmt_list_entry *tmp;
3426 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3427 ice_fltr_mgmt_list_entry, list_entry) {
3428 LIST_DEL(&entry->list_entry);
3429 ice_free(hw, entry);
3435 * ice_rem_adv_rule_info
3436 * @hw: pointer to the hardware structure
3437 * @rule_head: pointer to the switch list structure that we want to delete
3440 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3442 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3443 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3445 if (LIST_EMPTY(rule_head))
3448 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3449 ice_adv_fltr_mgmt_list_entry, list_entry) {
3450 LIST_DEL(&lst_itr->list_entry);
3451 ice_free(hw, lst_itr->lkups);
3452 ice_free(hw, lst_itr);
3457 * ice_rem_all_sw_rules_info
3458 * @hw: pointer to the hardware structure
3460 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3462 struct ice_switch_info *sw = hw->switch_info;
3465 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3466 struct LIST_HEAD_TYPE *rule_head;
3468 rule_head = &sw->recp_list[i].filt_rules;
3469 if (!sw->recp_list[i].adv_rule)
3470 ice_rem_sw_rule_info(hw, rule_head);
3472 ice_rem_adv_rule_info(hw, rule_head);
3477 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3478 * @pi: pointer to the port_info structure
3479 * @vsi_handle: VSI handle to set as default
3480 * @set: true to add the above mentioned switch rule, false to remove it
3481 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3483 * add filter rule to set/unset given VSI as default VSI for the switch
3484 * (represented by swid)
3487 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3490 struct ice_aqc_sw_rules_elem *s_rule;
3491 struct ice_fltr_info f_info;
3492 struct ice_hw *hw = pi->hw;
3493 enum ice_adminq_opc opcode;
3494 enum ice_status status;
3498 if (!ice_is_vsi_valid(hw, vsi_handle))
3499 return ICE_ERR_PARAM;
3500 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3502 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3503 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3504 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3506 return ICE_ERR_NO_MEMORY;
3508 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3510 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3511 f_info.flag = direction;
3512 f_info.fltr_act = ICE_FWD_TO_VSI;
3513 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3515 if (f_info.flag & ICE_FLTR_RX) {
3516 f_info.src = pi->lport;
3517 f_info.src_id = ICE_SRC_ID_LPORT;
3519 f_info.fltr_rule_id =
3520 pi->dflt_rx_vsi_rule_id;
3521 } else if (f_info.flag & ICE_FLTR_TX) {
3522 f_info.src_id = ICE_SRC_ID_VSI;
3523 f_info.src = hw_vsi_id;
3525 f_info.fltr_rule_id =
3526 pi->dflt_tx_vsi_rule_id;
3530 opcode = ice_aqc_opc_add_sw_rules;
3532 opcode = ice_aqc_opc_remove_sw_rules;
3534 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3536 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3537 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3540 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3542 if (f_info.flag & ICE_FLTR_TX) {
3543 pi->dflt_tx_vsi_num = hw_vsi_id;
3544 pi->dflt_tx_vsi_rule_id = index;
3545 } else if (f_info.flag & ICE_FLTR_RX) {
3546 pi->dflt_rx_vsi_num = hw_vsi_id;
3547 pi->dflt_rx_vsi_rule_id = index;
3550 if (f_info.flag & ICE_FLTR_TX) {
3551 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3552 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3553 } else if (f_info.flag & ICE_FLTR_RX) {
3554 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3555 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3560 ice_free(hw, s_rule);
3565 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3566 * @hw: pointer to the hardware structure
3567 * @recp_id: lookup type for which the specified rule needs to be searched
3568 * @f_info: rule information
3570 * Helper function to search for a unicast rule entry - this is to be used
3571 * to remove unicast MAC filter that is not shared with other VSIs on the
3574 * Returns pointer to entry storing the rule if found
3576 static struct ice_fltr_mgmt_list_entry *
3577 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3578 struct ice_fltr_info *f_info)
3580 struct ice_switch_info *sw = hw->switch_info;
3581 struct ice_fltr_mgmt_list_entry *list_itr;
3582 struct LIST_HEAD_TYPE *list_head;
3584 list_head = &sw->recp_list[recp_id].filt_rules;
3585 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3587 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3588 sizeof(f_info->l_data)) &&
3589 f_info->fwd_id.hw_vsi_id ==
3590 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3591 f_info->flag == list_itr->fltr_info.flag)
3598 * ice_remove_mac - remove a MAC address based filter rule
3599 * @hw: pointer to the hardware structure
3600 * @m_list: list of MAC addresses and forwarding information
3602 * This function removes either a MAC filter rule or a specific VSI from a
3603 * VSI list for a multicast MAC address.
3605 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3606 * ice_add_mac. Caller should be aware that this call will only work if all
3607 * the entries passed into m_list were added previously. It will not attempt to
3608 * do a partial remove of entries that were found.
3611 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3613 struct ice_fltr_list_entry *list_itr, *tmp;
3614 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3617 return ICE_ERR_PARAM;
3619 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3620 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3622 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3623 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3626 if (l_type != ICE_SW_LKUP_MAC)
3627 return ICE_ERR_PARAM;
3629 vsi_handle = list_itr->fltr_info.vsi_handle;
3630 if (!ice_is_vsi_valid(hw, vsi_handle))
3631 return ICE_ERR_PARAM;
3633 list_itr->fltr_info.fwd_id.hw_vsi_id =
3634 ice_get_hw_vsi_num(hw, vsi_handle);
3635 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3636 /* Don't remove the unicast address that belongs to
3637 * another VSI on the switch, since it is not being
3640 ice_acquire_lock(rule_lock);
3641 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3642 &list_itr->fltr_info)) {
3643 ice_release_lock(rule_lock);
3644 return ICE_ERR_DOES_NOT_EXIST;
3646 ice_release_lock(rule_lock);
3648 list_itr->status = ice_remove_rule_internal(hw,
3651 if (list_itr->status)
3652 return list_itr->status;
3658 * ice_remove_vlan - Remove VLAN based filter rule
3659 * @hw: pointer to the hardware structure
3660 * @v_list: list of VLAN entries and forwarding information
3663 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3665 struct ice_fltr_list_entry *v_list_itr, *tmp;
3668 return ICE_ERR_PARAM;
3670 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3672 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3674 if (l_type != ICE_SW_LKUP_VLAN)
3675 return ICE_ERR_PARAM;
3676 v_list_itr->status = ice_remove_rule_internal(hw,
3679 if (v_list_itr->status)
3680 return v_list_itr->status;
3686 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3687 * @hw: pointer to the hardware structure
3688 * @v_list: list of MAC VLAN entries and forwarding information
3691 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3693 struct ice_fltr_list_entry *v_list_itr, *tmp;
3696 return ICE_ERR_PARAM;
3698 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3700 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3702 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3703 return ICE_ERR_PARAM;
3704 v_list_itr->status =
3705 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3707 if (v_list_itr->status)
3708 return v_list_itr->status;
3714 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3715 * @fm_entry: filter entry to inspect
3716 * @vsi_handle: VSI handle to compare with filter info
3719 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3721 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3722 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3723 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3724 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3729 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3730 * @hw: pointer to the hardware structure
3731 * @vsi_handle: VSI handle to remove filters from
3732 * @vsi_list_head: pointer to the list to add entry to
3733 * @fi: pointer to fltr_info of filter entry to copy & add
3735 * Helper function, used when creating a list of filters to remove from
3736 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3737 * original filter entry, with the exception of fltr_info.fltr_act and
3738 * fltr_info.fwd_id fields. These are set such that later logic can
3739 * extract which VSI to remove the fltr from, and pass on that information.
3741 static enum ice_status
3742 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3743 struct LIST_HEAD_TYPE *vsi_list_head,
3744 struct ice_fltr_info *fi)
3746 struct ice_fltr_list_entry *tmp;
3748 /* this memory is freed up in the caller function
3749 * once filters for this VSI are removed
3751 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3753 return ICE_ERR_NO_MEMORY;
3755 tmp->fltr_info = *fi;
3757 /* Overwrite these fields to indicate which VSI to remove filter from,
3758 * so find and remove logic can extract the information from the
3759 * list entries. Note that original entries will still have proper
3762 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3763 tmp->fltr_info.vsi_handle = vsi_handle;
3764 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3766 LIST_ADD(&tmp->list_entry, vsi_list_head);
3772 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3773 * @hw: pointer to the hardware structure
3774 * @vsi_handle: VSI handle to remove filters from
3775 * @lkup_list_head: pointer to the list that has certain lookup type filters
3776 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3778 * Locates all filters in lkup_list_head that are used by the given VSI,
3779 * and adds COPIES of those entries to vsi_list_head (intended to be used
3780 * to remove the listed filters).
3781 * Note that this means all entries in vsi_list_head must be explicitly
3782 * deallocated by the caller when done with list.
3784 static enum ice_status
3785 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3786 struct LIST_HEAD_TYPE *lkup_list_head,
3787 struct LIST_HEAD_TYPE *vsi_list_head)
3789 struct ice_fltr_mgmt_list_entry *fm_entry;
3790 enum ice_status status = ICE_SUCCESS;
3792 /* check to make sure VSI ID is valid and within boundary */
3793 if (!ice_is_vsi_valid(hw, vsi_handle))
3794 return ICE_ERR_PARAM;
3796 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3797 ice_fltr_mgmt_list_entry, list_entry) {
3798 struct ice_fltr_info *fi;
3800 fi = &fm_entry->fltr_info;
3801 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3804 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3813 * ice_determine_promisc_mask
3814 * @fi: filter info to parse
3816 * Helper function to determine which ICE_PROMISC_ mask corresponds
3817 * to given filter into.
3819 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3821 u16 vid = fi->l_data.mac_vlan.vlan_id;
3822 u8 *macaddr = fi->l_data.mac.mac_addr;
3823 bool is_tx_fltr = false;
3824 u8 promisc_mask = 0;
3826 if (fi->flag == ICE_FLTR_TX)
3829 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3830 promisc_mask |= is_tx_fltr ?
3831 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3832 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3833 promisc_mask |= is_tx_fltr ?
3834 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3835 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3836 promisc_mask |= is_tx_fltr ?
3837 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3839 promisc_mask |= is_tx_fltr ?
3840 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3842 return promisc_mask;
3846 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3847 * @hw: pointer to the hardware structure
3848 * @vsi_handle: VSI handle to retrieve info from
3849 * @promisc_mask: pointer to mask to be filled in
3850 * @vid: VLAN ID of promisc VLAN VSI
3853 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3856 struct ice_switch_info *sw = hw->switch_info;
3857 struct ice_fltr_mgmt_list_entry *itr;
3858 struct LIST_HEAD_TYPE *rule_head;
3859 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3861 if (!ice_is_vsi_valid(hw, vsi_handle))
3862 return ICE_ERR_PARAM;
3866 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3867 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3869 ice_acquire_lock(rule_lock);
3870 LIST_FOR_EACH_ENTRY(itr, rule_head,
3871 ice_fltr_mgmt_list_entry, list_entry) {
3872 /* Continue if this filter doesn't apply to this VSI or the
3873 * VSI ID is not in the VSI map for this filter
3875 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3878 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3880 ice_release_lock(rule_lock);
3886 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3887 * @hw: pointer to the hardware structure
3888 * @vsi_handle: VSI handle to retrieve info from
3889 * @promisc_mask: pointer to mask to be filled in
3890 * @vid: VLAN ID of promisc VLAN VSI
3893 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3896 struct ice_switch_info *sw = hw->switch_info;
3897 struct ice_fltr_mgmt_list_entry *itr;
3898 struct LIST_HEAD_TYPE *rule_head;
3899 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3901 if (!ice_is_vsi_valid(hw, vsi_handle))
3902 return ICE_ERR_PARAM;
3906 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3907 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3909 ice_acquire_lock(rule_lock);
3910 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3912 /* Continue if this filter doesn't apply to this VSI or the
3913 * VSI ID is not in the VSI map for this filter
3915 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3918 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3920 ice_release_lock(rule_lock);
3926 * ice_remove_promisc - Remove promisc based filter rules
3927 * @hw: pointer to the hardware structure
3928 * @recp_id: recipe ID for which the rule needs to removed
3929 * @v_list: list of promisc entries
3931 static enum ice_status
3932 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3933 struct LIST_HEAD_TYPE *v_list)
3935 struct ice_fltr_list_entry *v_list_itr, *tmp;
3937 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3939 v_list_itr->status =
3940 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3941 if (v_list_itr->status)
3942 return v_list_itr->status;
3948 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3949 * @hw: pointer to the hardware structure
3950 * @vsi_handle: VSI handle to clear mode
3951 * @promisc_mask: mask of promiscuous config bits to clear
3952 * @vid: VLAN ID to clear VLAN promiscuous
3955 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3958 struct ice_switch_info *sw = hw->switch_info;
3959 struct ice_fltr_list_entry *fm_entry, *tmp;
3960 struct LIST_HEAD_TYPE remove_list_head;
3961 struct ice_fltr_mgmt_list_entry *itr;
3962 struct LIST_HEAD_TYPE *rule_head;
3963 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3964 enum ice_status status = ICE_SUCCESS;
3967 if (!ice_is_vsi_valid(hw, vsi_handle))
3968 return ICE_ERR_PARAM;
3970 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3971 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3973 recipe_id = ICE_SW_LKUP_PROMISC;
3975 rule_head = &sw->recp_list[recipe_id].filt_rules;
3976 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3978 INIT_LIST_HEAD(&remove_list_head);
3980 ice_acquire_lock(rule_lock);
3981 LIST_FOR_EACH_ENTRY(itr, rule_head,
3982 ice_fltr_mgmt_list_entry, list_entry) {
3983 struct ice_fltr_info *fltr_info;
3984 u8 fltr_promisc_mask = 0;
3986 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3988 fltr_info = &itr->fltr_info;
3990 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
3991 vid != fltr_info->l_data.mac_vlan.vlan_id)
3994 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
3996 /* Skip if filter is not completely specified by given mask */
3997 if (fltr_promisc_mask & ~promisc_mask)
4000 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4004 ice_release_lock(rule_lock);
4005 goto free_fltr_list;
4008 ice_release_lock(rule_lock);
4010 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4013 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4014 ice_fltr_list_entry, list_entry) {
4015 LIST_DEL(&fm_entry->list_entry);
4016 ice_free(hw, fm_entry);
4023 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4024 * @hw: pointer to the hardware structure
4025 * @vsi_handle: VSI handle to configure
4026 * @promisc_mask: mask of promiscuous config bits
4027 * @vid: VLAN ID to set VLAN promiscuous
4030 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4032 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4033 struct ice_fltr_list_entry f_list_entry;
4034 struct ice_fltr_info new_fltr;
4035 enum ice_status status = ICE_SUCCESS;
4041 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4043 if (!ice_is_vsi_valid(hw, vsi_handle))
4044 return ICE_ERR_PARAM;
4045 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4047 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4049 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4050 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4051 new_fltr.l_data.mac_vlan.vlan_id = vid;
4052 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4054 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4055 recipe_id = ICE_SW_LKUP_PROMISC;
4058 /* Separate filters must be set for each direction/packet type
4059 * combination, so we will loop over the mask value, store the
4060 * individual type, and clear it out in the input mask as it
4063 while (promisc_mask) {
4069 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4070 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4071 pkt_type = UCAST_FLTR;
4072 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4073 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4074 pkt_type = UCAST_FLTR;
4076 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4077 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4078 pkt_type = MCAST_FLTR;
4079 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4080 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4081 pkt_type = MCAST_FLTR;
4083 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4084 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4085 pkt_type = BCAST_FLTR;
4086 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4087 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4088 pkt_type = BCAST_FLTR;
4092 /* Check for VLAN promiscuous flag */
4093 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4094 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4095 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4096 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4100 /* Set filter DA based on packet type */
4101 mac_addr = new_fltr.l_data.mac.mac_addr;
4102 if (pkt_type == BCAST_FLTR) {
4103 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4104 } else if (pkt_type == MCAST_FLTR ||
4105 pkt_type == UCAST_FLTR) {
4106 /* Use the dummy ether header DA */
4107 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4108 ICE_NONDMA_TO_NONDMA);
4109 if (pkt_type == MCAST_FLTR)
4110 mac_addr[0] |= 0x1; /* Set multicast bit */
4113 /* Need to reset this to zero for all iterations */
4116 new_fltr.flag |= ICE_FLTR_TX;
4117 new_fltr.src = hw_vsi_id;
4119 new_fltr.flag |= ICE_FLTR_RX;
4120 new_fltr.src = hw->port_info->lport;
4123 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4124 new_fltr.vsi_handle = vsi_handle;
4125 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4126 f_list_entry.fltr_info = new_fltr;
4128 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4129 if (status != ICE_SUCCESS)
4130 goto set_promisc_exit;
4138 * ice_set_vlan_vsi_promisc
4139 * @hw: pointer to the hardware structure
4140 * @vsi_handle: VSI handle to configure
4141 * @promisc_mask: mask of promiscuous config bits
4142 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4144 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4147 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4148 bool rm_vlan_promisc)
4150 struct ice_switch_info *sw = hw->switch_info;
4151 struct ice_fltr_list_entry *list_itr, *tmp;
4152 struct LIST_HEAD_TYPE vsi_list_head;
4153 struct LIST_HEAD_TYPE *vlan_head;
4154 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4155 enum ice_status status;
4158 INIT_LIST_HEAD(&vsi_list_head);
4159 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4160 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4161 ice_acquire_lock(vlan_lock);
4162 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4164 ice_release_lock(vlan_lock);
4166 goto free_fltr_list;
4168 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4170 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4171 if (rm_vlan_promisc)
4172 status = ice_clear_vsi_promisc(hw, vsi_handle,
4173 promisc_mask, vlan_id);
4175 status = ice_set_vsi_promisc(hw, vsi_handle,
4176 promisc_mask, vlan_id);
4182 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4183 ice_fltr_list_entry, list_entry) {
4184 LIST_DEL(&list_itr->list_entry);
4185 ice_free(hw, list_itr);
4191 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4192 * @hw: pointer to the hardware structure
4193 * @vsi_handle: VSI handle to remove filters from
4194 * @lkup: switch rule filter lookup type
4197 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4198 enum ice_sw_lkup_type lkup)
4200 struct ice_switch_info *sw = hw->switch_info;
4201 struct ice_fltr_list_entry *fm_entry;
4202 struct LIST_HEAD_TYPE remove_list_head;
4203 struct LIST_HEAD_TYPE *rule_head;
4204 struct ice_fltr_list_entry *tmp;
4205 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4206 enum ice_status status;
4208 INIT_LIST_HEAD(&remove_list_head);
4209 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4210 rule_head = &sw->recp_list[lkup].filt_rules;
4211 ice_acquire_lock(rule_lock);
4212 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4214 ice_release_lock(rule_lock);
4219 case ICE_SW_LKUP_MAC:
4220 ice_remove_mac(hw, &remove_list_head);
4222 case ICE_SW_LKUP_VLAN:
4223 ice_remove_vlan(hw, &remove_list_head);
4225 case ICE_SW_LKUP_PROMISC:
4226 case ICE_SW_LKUP_PROMISC_VLAN:
4227 ice_remove_promisc(hw, lkup, &remove_list_head);
4229 case ICE_SW_LKUP_MAC_VLAN:
4230 ice_remove_mac_vlan(hw, &remove_list_head);
4232 case ICE_SW_LKUP_ETHERTYPE:
4233 case ICE_SW_LKUP_ETHERTYPE_MAC:
4234 ice_remove_eth_mac(hw, &remove_list_head);
4236 case ICE_SW_LKUP_DFLT:
4237 ice_debug(hw, ICE_DBG_SW,
4238 "Remove filters for this lookup type hasn't been implemented yet\n");
4240 case ICE_SW_LKUP_LAST:
4241 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4245 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4246 ice_fltr_list_entry, list_entry) {
4247 LIST_DEL(&fm_entry->list_entry);
4248 ice_free(hw, fm_entry);
4253 * ice_remove_vsi_fltr - Remove all filters for a VSI
4254 * @hw: pointer to the hardware structure
4255 * @vsi_handle: VSI handle to remove filters from
4257 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4259 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4261 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4262 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4263 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4264 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4265 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4266 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4267 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4268 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4272 * ice_alloc_res_cntr - allocating resource counter
4273 * @hw: pointer to the hardware structure
4274 * @type: type of resource
4275 * @alloc_shared: if set it is shared else dedicated
4276 * @num_items: number of entries requested for FD resource type
4277 * @counter_id: counter index returned by AQ call
4280 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4283 struct ice_aqc_alloc_free_res_elem *buf;
4284 enum ice_status status;
4287 /* Allocate resource */
4288 buf_len = sizeof(*buf);
4289 buf = (struct ice_aqc_alloc_free_res_elem *)
4290 ice_malloc(hw, buf_len);
4292 return ICE_ERR_NO_MEMORY;
4294 buf->num_elems = CPU_TO_LE16(num_items);
4295 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4296 ICE_AQC_RES_TYPE_M) | alloc_shared);
4298 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4299 ice_aqc_opc_alloc_res, NULL);
4303 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4311 * ice_free_res_cntr - free resource counter
4312 * @hw: pointer to the hardware structure
4313 * @type: type of resource
4314 * @alloc_shared: if set it is shared else dedicated
4315 * @num_items: number of entries to be freed for FD resource type
4316 * @counter_id: counter ID resource which needs to be freed
4319 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4322 struct ice_aqc_alloc_free_res_elem *buf;
4323 enum ice_status status;
4327 buf_len = sizeof(*buf);
4328 buf = (struct ice_aqc_alloc_free_res_elem *)
4329 ice_malloc(hw, buf_len);
4331 return ICE_ERR_NO_MEMORY;
4333 buf->num_elems = CPU_TO_LE16(num_items);
4334 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4335 ICE_AQC_RES_TYPE_M) | alloc_shared);
4336 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4338 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4339 ice_aqc_opc_free_res, NULL);
4341 ice_debug(hw, ICE_DBG_SW,
4342 "counter resource could not be freed\n");
4349 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4350 * @hw: pointer to the hardware structure
4351 * @counter_id: returns counter index
4353 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4355 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4356 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4361 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4362 * @hw: pointer to the hardware structure
4363 * @counter_id: counter index to be freed
4365 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4367 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4368 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4373 * ice_alloc_res_lg_act - add large action resource
4374 * @hw: pointer to the hardware structure
4375 * @l_id: large action ID to fill it in
4376 * @num_acts: number of actions to hold with a large action entry
4378 static enum ice_status
4379 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4381 struct ice_aqc_alloc_free_res_elem *sw_buf;
4382 enum ice_status status;
4385 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4386 return ICE_ERR_PARAM;
4388 /* Allocate resource for large action */
4389 buf_len = sizeof(*sw_buf);
4390 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4391 ice_malloc(hw, buf_len);
4393 return ICE_ERR_NO_MEMORY;
4395 sw_buf->num_elems = CPU_TO_LE16(1);
4397 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4398 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4399 * If num_acts is greater than 2, then use
4400 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4401 * The num_acts cannot exceed 4. This was ensured at the
4402 * beginning of the function.
4405 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4406 else if (num_acts == 2)
4407 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4409 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4411 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4412 ice_aqc_opc_alloc_res, NULL);
4414 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4416 ice_free(hw, sw_buf);
4421 * ice_add_mac_with_sw_marker - add filter with sw marker
4422 * @hw: pointer to the hardware structure
4423 * @f_info: filter info structure containing the MAC filter information
4424 * @sw_marker: sw marker to tag the Rx descriptor with
4427 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4430 struct ice_switch_info *sw = hw->switch_info;
4431 struct ice_fltr_mgmt_list_entry *m_entry;
4432 struct ice_fltr_list_entry fl_info;
4433 struct LIST_HEAD_TYPE l_head;
4434 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4435 enum ice_status ret;
4439 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4440 return ICE_ERR_PARAM;
4442 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4443 return ICE_ERR_PARAM;
4445 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4446 return ICE_ERR_PARAM;
4448 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4449 return ICE_ERR_PARAM;
4450 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4452 /* Add filter if it doesn't exist so then the adding of large
4453 * action always results in update
4456 INIT_LIST_HEAD(&l_head);
4457 fl_info.fltr_info = *f_info;
4458 LIST_ADD(&fl_info.list_entry, &l_head);
4460 entry_exists = false;
4461 ret = ice_add_mac(hw, &l_head);
4462 if (ret == ICE_ERR_ALREADY_EXISTS)
4463 entry_exists = true;
4467 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4468 ice_acquire_lock(rule_lock);
4469 /* Get the book keeping entry for the filter */
4470 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4474 /* If counter action was enabled for this rule then don't enable
4475 * sw marker large action
4477 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4478 ret = ICE_ERR_PARAM;
4482 /* if same marker was added before */
4483 if (m_entry->sw_marker_id == sw_marker) {
4484 ret = ICE_ERR_ALREADY_EXISTS;
4488 /* Allocate a hardware table entry to hold large act. Three actions
4489 * for marker based large action
4491 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4495 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4498 /* Update the switch rule to add the marker action */
4499 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4501 ice_release_lock(rule_lock);
4506 ice_release_lock(rule_lock);
4507 /* only remove entry if it did not exist previously */
4509 ret = ice_remove_mac(hw, &l_head);
4515 * ice_add_mac_with_counter - add filter with counter enabled
4516 * @hw: pointer to the hardware structure
4517 * @f_info: pointer to filter info structure containing the MAC filter
4521 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4523 struct ice_switch_info *sw = hw->switch_info;
4524 struct ice_fltr_mgmt_list_entry *m_entry;
4525 struct ice_fltr_list_entry fl_info;
4526 struct LIST_HEAD_TYPE l_head;
4527 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4528 enum ice_status ret;
4533 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4534 return ICE_ERR_PARAM;
4536 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4537 return ICE_ERR_PARAM;
4539 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4540 return ICE_ERR_PARAM;
4541 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4543 entry_exist = false;
4545 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4547 /* Add filter if it doesn't exist so then the adding of large
4548 * action always results in update
4550 INIT_LIST_HEAD(&l_head);
4552 fl_info.fltr_info = *f_info;
4553 LIST_ADD(&fl_info.list_entry, &l_head);
4555 ret = ice_add_mac(hw, &l_head);
4556 if (ret == ICE_ERR_ALREADY_EXISTS)
4561 ice_acquire_lock(rule_lock);
4562 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4564 ret = ICE_ERR_BAD_PTR;
4568 /* Don't enable counter for a filter for which sw marker was enabled */
4569 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4570 ret = ICE_ERR_PARAM;
4574 /* If a counter was already enabled then don't need to add again */
4575 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4576 ret = ICE_ERR_ALREADY_EXISTS;
4580 /* Allocate a hardware table entry to VLAN counter */
4581 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4585 /* Allocate a hardware table entry to hold large act. Two actions for
4586 * counter based large action
4588 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4592 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4595 /* Update the switch rule to add the counter action */
4596 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4598 ice_release_lock(rule_lock);
4603 ice_release_lock(rule_lock);
4604 /* only remove entry if it did not exist previously */
4606 ret = ice_remove_mac(hw, &l_head);
4611 /* This is mapping table entry that maps every word within a given protocol
4612 * structure to the real byte offset as per the specification of that
4614 * for example dst address is 3 words in ethertype header and corresponding
4615 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4616 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4617 * matching entry describing its field. This needs to be updated if new
4618 * structure is added to that union.
4620 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4621 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4622 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4623 { ICE_ETYPE_OL, { 0 } },
4624 { ICE_VLAN_OFOS, { 0, 2 } },
4625 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4626 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4627 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4628 26, 28, 30, 32, 34, 36, 38 } },
4629 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4630 26, 28, 30, 32, 34, 36, 38 } },
4631 { ICE_TCP_IL, { 0, 2 } },
4632 { ICE_UDP_OF, { 0, 2 } },
4633 { ICE_UDP_ILOS, { 0, 2 } },
4634 { ICE_SCTP_IL, { 0, 2 } },
4635 { ICE_VXLAN, { 8, 10, 12, 14 } },
4636 { ICE_GENEVE, { 8, 10, 12, 14 } },
4637 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4638 { ICE_NVGRE, { 0, 2, 4, 6 } },
4639 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4640 { ICE_PPPOE, { 0, 2, 4, 6 } },
4643 /* The following table describes preferred grouping of recipes.
4644 * If a recipe that needs to be programmed is a superset or matches one of the
4645 * following combinations, then the recipe needs to be chained as per the
4649 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4650 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4651 { ICE_MAC_IL, ICE_MAC_IL_HW },
4652 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4653 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4654 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4655 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4656 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4657 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4658 { ICE_TCP_IL, ICE_TCP_IL_HW },
4659 { ICE_UDP_OF, ICE_UDP_OF_HW },
4660 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4661 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4662 { ICE_VXLAN, ICE_UDP_OF_HW },
4663 { ICE_GENEVE, ICE_UDP_OF_HW },
4664 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4665 { ICE_NVGRE, ICE_GRE_OF_HW },
4666 { ICE_GTP, ICE_UDP_OF_HW },
4667 { ICE_PPPOE, ICE_PPPOE_HW },
4671 * ice_find_recp - find a recipe
4672 * @hw: pointer to the hardware structure
4673 * @lkup_exts: extension sequence to match
4675 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4677 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4679 bool refresh_required = true;
4680 struct ice_sw_recipe *recp;
4683 /* Walk through existing recipes to find a match */
4684 recp = hw->switch_info->recp_list;
4685 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4686 /* If recipe was not created for this ID, in SW bookkeeping,
4687 * check if FW has an entry for this recipe. If the FW has an
4688 * entry update it in our SW bookkeeping and continue with the
4691 if (!recp[i].recp_created)
4692 if (ice_get_recp_frm_fw(hw,
4693 hw->switch_info->recp_list, i,
4697 /* Skip inverse action recipes */
4698 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4699 ICE_AQ_RECIPE_ACT_INV_ACT)
4702 /* if number of words we are looking for match */
4703 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4704 struct ice_fv_word *a = lkup_exts->fv_words;
4705 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4709 for (p = 0; p < lkup_exts->n_val_words; p++) {
4710 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4712 if (a[p].off == b[q].off &&
4713 a[p].prot_id == b[q].prot_id)
4714 /* Found the "p"th word in the
4719 /* After walking through all the words in the
4720 * "i"th recipe if "p"th word was not found then
4721 * this recipe is not what we are looking for.
4722 * So break out from this loop and try the next
4725 if (q >= recp[i].lkup_exts.n_val_words) {
4730 /* If for "i"th recipe the found was never set to false
4731 * then it means we found our match
4734 return i; /* Return the recipe ID */
4737 return ICE_MAX_NUM_RECIPES;
4741 * ice_prot_type_to_id - get protocol ID from protocol type
4742 * @type: protocol type
4743 * @id: pointer to variable that will receive the ID
4745 * Returns true if found, false otherwise
4747 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4751 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4752 if (ice_prot_id_tbl[i].type == type) {
4753 *id = ice_prot_id_tbl[i].protocol_id;
4760 * ice_find_valid_words - count valid words
4761 * @rule: advanced rule with lookup information
4762 * @lkup_exts: byte offset extractions of the words that are valid
4764 * calculate valid words in a lookup rule using mask value
4767 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4768 struct ice_prot_lkup_ext *lkup_exts)
4774 if (!ice_prot_type_to_id(rule->type, &prot_id))
4777 word = lkup_exts->n_val_words;
4779 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4780 if (((u16 *)&rule->m_u)[j] &&
4781 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4782 /* No more space to accommodate */
4783 if (word >= ICE_MAX_CHAIN_WORDS)
4785 lkup_exts->fv_words[word].off =
4786 ice_prot_ext[rule->type].offs[j];
4787 lkup_exts->fv_words[word].prot_id =
4788 ice_prot_id_tbl[rule->type].protocol_id;
4789 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4793 ret_val = word - lkup_exts->n_val_words;
4794 lkup_exts->n_val_words = word;
4800 * ice_create_first_fit_recp_def - Create a recipe grouping
4801 * @hw: pointer to the hardware structure
4802 * @lkup_exts: an array of protocol header extractions
4803 * @rg_list: pointer to a list that stores new recipe groups
4804 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4806 * Using first fit algorithm, take all the words that are still not done
4807 * and start grouping them in 4-word groups. Each group makes up one
4810 static enum ice_status
4811 ice_create_first_fit_recp_def(struct ice_hw *hw,
4812 struct ice_prot_lkup_ext *lkup_exts,
4813 struct LIST_HEAD_TYPE *rg_list,
4816 struct ice_pref_recipe_group *grp = NULL;
4821 /* Walk through every word in the rule to check if it is not done. If so
4822 * then this word needs to be part of a new recipe.
4824 for (j = 0; j < lkup_exts->n_val_words; j++)
4825 if (!ice_is_bit_set(lkup_exts->done, j)) {
4827 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4828 struct ice_recp_grp_entry *entry;
4830 entry = (struct ice_recp_grp_entry *)
4831 ice_malloc(hw, sizeof(*entry));
4833 return ICE_ERR_NO_MEMORY;
4834 LIST_ADD(&entry->l_entry, rg_list);
4835 grp = &entry->r_group;
4839 grp->pairs[grp->n_val_pairs].prot_id =
4840 lkup_exts->fv_words[j].prot_id;
4841 grp->pairs[grp->n_val_pairs].off =
4842 lkup_exts->fv_words[j].off;
4843 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4851 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4852 * @hw: pointer to the hardware structure
4853 * @fv_list: field vector with the extraction sequence information
4854 * @rg_list: recipe groupings with protocol-offset pairs
4856 * Helper function to fill in the field vector indices for protocol-offset
4857 * pairs. These indexes are then ultimately programmed into a recipe.
4859 static enum ice_status
4860 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4861 struct LIST_HEAD_TYPE *rg_list)
4863 struct ice_sw_fv_list_entry *fv;
4864 struct ice_recp_grp_entry *rg;
4865 struct ice_fv_word *fv_ext;
4867 if (LIST_EMPTY(fv_list))
4870 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4871 fv_ext = fv->fv_ptr->ew;
4873 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4876 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4877 struct ice_fv_word *pr;
4882 pr = &rg->r_group.pairs[i];
4883 mask = rg->r_group.mask[i];
4885 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4886 if (fv_ext[j].prot_id == pr->prot_id &&
4887 fv_ext[j].off == pr->off) {
4890 /* Store index of field vector */
4892 /* Mask is given by caller as big
4893 * endian, but sent to FW as little
4896 rg->fv_mask[i] = mask << 8 | mask >> 8;
4900 /* Protocol/offset could not be found, caller gave an
4904 return ICE_ERR_PARAM;
4912 * ice_find_free_recp_res_idx - find free result indexes for recipe
4913 * @hw: pointer to hardware structure
4914 * @profiles: bitmap of profiles that will be associated with the new recipe
4915 * @free_idx: pointer to variable to receive the free index bitmap
4917 * The algorithm used here is:
4918 * 1. When creating a new recipe, create a set P which contains all
4919 * Profiles that will be associated with our new recipe
4921 * 2. For each Profile p in set P:
4922 * a. Add all recipes associated with Profile p into set R
4923 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4924 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4925 * i. Or just assume they all have the same possible indexes:
4927 * i.e., PossibleIndexes = 0x0000F00000000000
4929 * 3. For each Recipe r in set R:
4930 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4931 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4933 * FreeIndexes will contain the bits indicating the indexes free for use,
4934 * then the code needs to update the recipe[r].used_result_idx_bits to
4935 * indicate which indexes were selected for use by this recipe.
4938 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
4939 ice_bitmap_t *free_idx)
4941 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4942 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4943 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
4947 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4948 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4949 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
4950 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
4952 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
4953 ice_set_bit(count, possible_idx);
4955 /* For each profile we are going to associate the recipe with, add the
4956 * recipes that are associated with that profile. This will give us
4957 * the set of recipes that our recipe may collide with. Also, determine
4958 * what possible result indexes are usable given this set of profiles.
4961 while (ICE_MAX_NUM_PROFILES >
4962 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
4963 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
4964 ICE_MAX_NUM_RECIPES);
4965 ice_and_bitmap(possible_idx, possible_idx,
4966 hw->switch_info->prof_res_bm[bit],
4971 /* For each recipe that our new recipe may collide with, determine
4972 * which indexes have been used.
4974 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
4975 if (ice_is_bit_set(recipes, bit)) {
4976 ice_or_bitmap(used_idx, used_idx,
4977 hw->switch_info->recp_list[bit].res_idxs,
4981 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4983 /* return number of free indexes */
4986 while (ICE_MAX_FV_WORDS >
4987 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
4996 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4997 * @hw: pointer to hardware structure
4998 * @rm: recipe management list entry
4999 * @match_tun: if field vector index for tunnel needs to be programmed
5000 * @profiles: bitmap of profiles that will be assocated.
5002 static enum ice_status
5003 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5004 bool match_tun, ice_bitmap_t *profiles)
5006 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5007 struct ice_aqc_recipe_data_elem *tmp;
5008 struct ice_aqc_recipe_data_elem *buf;
5009 struct ice_recp_grp_entry *entry;
5010 enum ice_status status;
5016 /* When more than one recipe are required, another recipe is needed to
5017 * chain them together. Matching a tunnel metadata ID takes up one of
5018 * the match fields in the chaining recipe reducing the number of
5019 * chained recipes by one.
5021 /* check number of free result indices */
5022 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5023 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5025 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5026 free_res_idx, rm->n_grp_count);
5028 if (rm->n_grp_count > 1) {
5029 if (rm->n_grp_count > free_res_idx)
5030 return ICE_ERR_MAX_LIMIT;
5035 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5036 ICE_MAX_NUM_RECIPES,
5039 return ICE_ERR_NO_MEMORY;
5041 buf = (struct ice_aqc_recipe_data_elem *)
5042 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5044 status = ICE_ERR_NO_MEMORY;
5048 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5049 recipe_count = ICE_MAX_NUM_RECIPES;
5050 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5052 if (status || recipe_count == 0)
5055 /* Allocate the recipe resources, and configure them according to the
5056 * match fields from protocol headers and extracted field vectors.
5058 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5059 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5062 status = ice_alloc_recipe(hw, &entry->rid);
5066 /* Clear the result index of the located recipe, as this will be
5067 * updated, if needed, later in the recipe creation process.
5069 tmp[0].content.result_indx = 0;
5071 buf[recps] = tmp[0];
5072 buf[recps].recipe_indx = (u8)entry->rid;
5073 /* if the recipe is a non-root recipe RID should be programmed
5074 * as 0 for the rules to be applied correctly.
5076 buf[recps].content.rid = 0;
5077 ice_memset(&buf[recps].content.lkup_indx, 0,
5078 sizeof(buf[recps].content.lkup_indx),
5081 /* All recipes use look-up index 0 to match switch ID. */
5082 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5083 buf[recps].content.mask[0] =
5084 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5085 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5088 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5089 buf[recps].content.lkup_indx[i] = 0x80;
5090 buf[recps].content.mask[i] = 0;
5093 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5094 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5095 buf[recps].content.mask[i + 1] =
5096 CPU_TO_LE16(entry->fv_mask[i]);
5099 if (rm->n_grp_count > 1) {
5100 /* Checks to see if there really is a valid result index
5103 if (chain_idx >= ICE_MAX_FV_WORDS) {
5104 ice_debug(hw, ICE_DBG_SW,
5105 "No chain index available\n");
5106 status = ICE_ERR_MAX_LIMIT;
5110 entry->chain_idx = chain_idx;
5111 buf[recps].content.result_indx =
5112 ICE_AQ_RECIPE_RESULT_EN |
5113 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5114 ICE_AQ_RECIPE_RESULT_DATA_M);
5115 ice_clear_bit(chain_idx, result_idx_bm);
5116 chain_idx = ice_find_first_bit(result_idx_bm,
5120 /* fill recipe dependencies */
5121 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5122 ICE_MAX_NUM_RECIPES);
5123 ice_set_bit(buf[recps].recipe_indx,
5124 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5125 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5129 if (rm->n_grp_count == 1) {
5130 rm->root_rid = buf[0].recipe_indx;
5131 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5132 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5133 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5134 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5135 sizeof(buf[0].recipe_bitmap),
5136 ICE_NONDMA_TO_NONDMA);
5138 status = ICE_ERR_BAD_PTR;
5141 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5142 * the recipe which is getting created if specified
5143 * by user. Usually any advanced switch filter, which results
5144 * into new extraction sequence, ended up creating a new recipe
5145 * of type ROOT and usually recipes are associated with profiles
5146 * Switch rule referreing newly created recipe, needs to have
5147 * either/or 'fwd' or 'join' priority, otherwise switch rule
5148 * evaluation will not happen correctly. In other words, if
5149 * switch rule to be evaluated on priority basis, then recipe
5150 * needs to have priority, otherwise it will be evaluated last.
5152 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5154 struct ice_recp_grp_entry *last_chain_entry;
5157 /* Allocate the last recipe that will chain the outcomes of the
5158 * other recipes together
5160 status = ice_alloc_recipe(hw, &rid);
5164 buf[recps].recipe_indx = (u8)rid;
5165 buf[recps].content.rid = (u8)rid;
5166 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5167 /* the new entry created should also be part of rg_list to
5168 * make sure we have complete recipe
5170 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5171 sizeof(*last_chain_entry));
5172 if (!last_chain_entry) {
5173 status = ICE_ERR_NO_MEMORY;
5176 last_chain_entry->rid = rid;
5177 ice_memset(&buf[recps].content.lkup_indx, 0,
5178 sizeof(buf[recps].content.lkup_indx),
5180 /* All recipes use look-up index 0 to match switch ID. */
5181 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5182 buf[recps].content.mask[0] =
5183 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5184 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5185 buf[recps].content.lkup_indx[i] =
5186 ICE_AQ_RECIPE_LKUP_IGNORE;
5187 buf[recps].content.mask[i] = 0;
5191 /* update r_bitmap with the recp that is used for chaining */
5192 ice_set_bit(rid, rm->r_bitmap);
5193 /* this is the recipe that chains all the other recipes so it
5194 * should not have a chaining ID to indicate the same
5196 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5197 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5199 last_chain_entry->fv_idx[i] = entry->chain_idx;
5200 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5201 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5202 ice_set_bit(entry->rid, rm->r_bitmap);
5204 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5205 if (sizeof(buf[recps].recipe_bitmap) >=
5206 sizeof(rm->r_bitmap)) {
5207 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5208 sizeof(buf[recps].recipe_bitmap),
5209 ICE_NONDMA_TO_NONDMA);
5211 status = ICE_ERR_BAD_PTR;
5214 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5216 /* To differentiate among different UDP tunnels, a meta data ID
5220 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5221 buf[recps].content.mask[i] =
5222 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5226 rm->root_rid = (u8)rid;
5228 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5232 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5233 ice_release_change_lock(hw);
5237 /* Every recipe that just got created add it to the recipe
5240 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5241 struct ice_switch_info *sw = hw->switch_info;
5242 bool is_root, idx_found = false;
5243 struct ice_sw_recipe *recp;
5244 u16 idx, buf_idx = 0;
5246 /* find buffer index for copying some data */
5247 for (idx = 0; idx < rm->n_grp_count; idx++)
5248 if (buf[idx].recipe_indx == entry->rid) {
5254 status = ICE_ERR_OUT_OF_RANGE;
5258 recp = &sw->recp_list[entry->rid];
5259 is_root = (rm->root_rid == entry->rid);
5260 recp->is_root = is_root;
5262 recp->root_rid = entry->rid;
5263 recp->big_recp = (is_root && rm->n_grp_count > 1);
5265 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5266 entry->r_group.n_val_pairs *
5267 sizeof(struct ice_fv_word),
5268 ICE_NONDMA_TO_NONDMA);
5270 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5271 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5273 /* Copy non-result fv index values and masks to recipe. This
5274 * call will also update the result recipe bitmask.
5276 ice_collect_result_idx(&buf[buf_idx], recp);
5278 /* for non-root recipes, also copy to the root, this allows
5279 * easier matching of a complete chained recipe
5282 ice_collect_result_idx(&buf[buf_idx],
5283 &sw->recp_list[rm->root_rid]);
5285 recp->n_ext_words = entry->r_group.n_val_pairs;
5286 recp->chain_idx = entry->chain_idx;
5287 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5288 recp->n_grp_count = rm->n_grp_count;
5289 recp->tun_type = rm->tun_type;
5290 recp->recp_created = true;
5305 * ice_create_recipe_group - creates recipe group
5306 * @hw: pointer to hardware structure
5307 * @rm: recipe management list entry
5308 * @lkup_exts: lookup elements
5310 static enum ice_status
5311 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5312 struct ice_prot_lkup_ext *lkup_exts)
5314 enum ice_status status;
5317 rm->n_grp_count = 0;
5319 /* Create recipes for words that are marked not done by packing them
5322 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5323 &rm->rg_list, &recp_count);
5325 rm->n_grp_count += recp_count;
5326 rm->n_ext_words = lkup_exts->n_val_words;
5327 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5328 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5329 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5330 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5337 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5338 * @hw: pointer to hardware structure
5339 * @lkups: lookup elements or match criteria for the advanced recipe, one
5340 * structure per protocol header
5341 * @lkups_cnt: number of protocols
5342 * @bm: bitmap of field vectors to consider
5343 * @fv_list: pointer to a list that holds the returned field vectors
5345 static enum ice_status
5346 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5347 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5349 enum ice_status status;
5353 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5355 return ICE_ERR_NO_MEMORY;
5357 for (i = 0; i < lkups_cnt; i++)
5358 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5359 status = ICE_ERR_CFG;
5363 /* Find field vectors that include all specified protocol types */
5364 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5367 ice_free(hw, prot_ids);
5372 * ice_add_special_words - Add words that are not protocols, such as metadata
5373 * @rinfo: other information regarding the rule e.g. priority and action info
5374 * @lkup_exts: lookup word structure
5376 static enum ice_status
5377 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5378 struct ice_prot_lkup_ext *lkup_exts)
5380 /* If this is a tunneled packet, then add recipe index to match the
5381 * tunnel bit in the packet metadata flags.
5383 if (rinfo->tun_type != ICE_NON_TUN) {
5384 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5385 u8 word = lkup_exts->n_val_words++;
5387 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5388 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5390 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5392 return ICE_ERR_MAX_LIMIT;
5399 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5400 * @hw: pointer to hardware structure
5401 * @rinfo: other information regarding the rule e.g. priority and action info
5402 * @bm: pointer to memory for returning the bitmap of field vectors
5405 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5408 enum ice_prof_type type;
5410 switch (rinfo->tun_type) {
5412 type = ICE_PROF_NON_TUN;
5414 case ICE_ALL_TUNNELS:
5415 type = ICE_PROF_TUN_ALL;
5417 case ICE_SW_TUN_VXLAN_GPE:
5418 case ICE_SW_TUN_GENEVE:
5419 case ICE_SW_TUN_VXLAN:
5420 case ICE_SW_TUN_UDP:
5421 case ICE_SW_TUN_GTP:
5422 type = ICE_PROF_TUN_UDP;
5424 case ICE_SW_TUN_NVGRE:
5425 type = ICE_PROF_TUN_GRE;
5427 case ICE_SW_TUN_PPPOE:
5428 type = ICE_PROF_TUN_PPPOE;
5430 case ICE_SW_TUN_AND_NON_TUN:
5432 type = ICE_PROF_ALL;
5436 ice_get_sw_fv_bitmap(hw, type, bm);
5440 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5441 * @hw: pointer to hardware structure
5442 * @lkups: lookup elements or match criteria for the advanced recipe, one
5443 * structure per protocol header
5444 * @lkups_cnt: number of protocols
5445 * @rinfo: other information regarding the rule e.g. priority and action info
5446 * @rid: return the recipe ID of the recipe created
5448 static enum ice_status
5449 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5450 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5452 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5453 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5454 struct ice_prot_lkup_ext *lkup_exts;
5455 struct ice_recp_grp_entry *r_entry;
5456 struct ice_sw_fv_list_entry *fvit;
5457 struct ice_recp_grp_entry *r_tmp;
5458 struct ice_sw_fv_list_entry *tmp;
5459 enum ice_status status = ICE_SUCCESS;
5460 struct ice_sw_recipe *rm;
5461 bool match_tun = false;
5465 return ICE_ERR_PARAM;
5467 lkup_exts = (struct ice_prot_lkup_ext *)
5468 ice_malloc(hw, sizeof(*lkup_exts));
5470 return ICE_ERR_NO_MEMORY;
5472 /* Determine the number of words to be matched and if it exceeds a
5473 * recipe's restrictions
5475 for (i = 0; i < lkups_cnt; i++) {
5478 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5479 status = ICE_ERR_CFG;
5480 goto err_free_lkup_exts;
5483 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5485 status = ICE_ERR_CFG;
5486 goto err_free_lkup_exts;
5490 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5492 status = ICE_ERR_NO_MEMORY;
5493 goto err_free_lkup_exts;
5496 /* Get field vectors that contain fields extracted from all the protocol
5497 * headers being programmed.
5499 INIT_LIST_HEAD(&rm->fv_list);
5500 INIT_LIST_HEAD(&rm->rg_list);
5502 /* Get bitmap of field vectors (profiles) that are compatible with the
5503 * rule request; only these will be searched in the subsequent call to
5506 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5508 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5512 /* Group match words into recipes using preferred recipe grouping
5515 status = ice_create_recipe_group(hw, rm, lkup_exts);
5519 /* There is only profile for UDP tunnels. So, it is necessary to use a
5520 * metadata ID flag to differentiate different tunnel types. A separate
5521 * recipe needs to be used for the metadata.
5523 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5524 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5525 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5528 /* set the recipe priority if specified */
5529 rm->priority = rinfo->priority ? rinfo->priority : 0;
5531 /* Find offsets from the field vector. Pick the first one for all the
5534 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5538 /* get bitmap of all profiles the recipe will be associated with */
5539 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5540 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5542 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5543 ice_set_bit((u16)fvit->profile_id, profiles);
5546 /* Create any special protocol/offset pairs, such as looking at tunnel
5547 * bits by extracting metadata
5549 status = ice_add_special_words(rinfo, lkup_exts);
5551 goto err_free_lkup_exts;
5553 /* Look for a recipe which matches our requested fv / mask list */
5554 *rid = ice_find_recp(hw, lkup_exts);
5555 if (*rid < ICE_MAX_NUM_RECIPES)
5556 /* Success if found a recipe that match the existing criteria */
5559 /* Recipe we need does not exist, add a recipe */
5560 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5564 /* Associate all the recipes created with all the profiles in the
5565 * common field vector.
5567 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5569 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5572 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5573 (u8 *)r_bitmap, NULL);
5577 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5578 ICE_MAX_NUM_RECIPES);
5579 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5583 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5586 ice_release_change_lock(hw);
5591 /* Update profile to recipe bitmap array */
5592 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5593 ICE_MAX_NUM_RECIPES);
5595 /* Update recipe to profile bitmap array */
5596 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5597 if (ice_is_bit_set(r_bitmap, j))
5598 ice_set_bit((u16)fvit->profile_id,
5599 recipe_to_profile[j]);
5602 *rid = rm->root_rid;
5603 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5604 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5606 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5607 ice_recp_grp_entry, l_entry) {
5608 LIST_DEL(&r_entry->l_entry);
5609 ice_free(hw, r_entry);
5612 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5614 LIST_DEL(&fvit->list_entry);
5619 ice_free(hw, rm->root_buf);
5624 ice_free(hw, lkup_exts);
5630 * ice_find_dummy_packet - find dummy packet by tunnel type
5632 * @lkups: lookup elements or match criteria for the advanced recipe, one
5633 * structure per protocol header
5634 * @lkups_cnt: number of protocols
5635 * @tun_type: tunnel type from the match criteria
5636 * @pkt: dummy packet to fill according to filter match criteria
5637 * @pkt_len: packet length of dummy packet
5638 * @offsets: pointer to receive the pointer to the offsets for the packet
5641 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5642 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5644 const struct ice_dummy_pkt_offsets **offsets)
5646 bool tcp = false, udp = false, ipv6 = false;
5649 if (tun_type == ICE_SW_TUN_GTP) {
5650 *pkt = dummy_udp_gtp_packet;
5651 *pkt_len = sizeof(dummy_udp_gtp_packet);
5652 *offsets = dummy_udp_gtp_packet_offsets;
5655 if (tun_type == ICE_SW_TUN_PPPOE) {
5656 *pkt = dummy_pppoe_packet;
5657 *pkt_len = sizeof(dummy_pppoe_packet);
5658 *offsets = dummy_pppoe_packet_offsets;
5661 for (i = 0; i < lkups_cnt; i++) {
5662 if (lkups[i].type == ICE_UDP_ILOS)
5664 else if (lkups[i].type == ICE_TCP_IL)
5666 else if (lkups[i].type == ICE_IPV6_OFOS)
5670 if (tun_type == ICE_ALL_TUNNELS) {
5671 *pkt = dummy_gre_udp_packet;
5672 *pkt_len = sizeof(dummy_gre_udp_packet);
5673 *offsets = dummy_gre_udp_packet_offsets;
5677 if (tun_type == ICE_SW_TUN_NVGRE) {
5679 *pkt = dummy_gre_tcp_packet;
5680 *pkt_len = sizeof(dummy_gre_tcp_packet);
5681 *offsets = dummy_gre_tcp_packet_offsets;
5685 *pkt = dummy_gre_udp_packet;
5686 *pkt_len = sizeof(dummy_gre_udp_packet);
5687 *offsets = dummy_gre_udp_packet_offsets;
5691 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5692 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5694 *pkt = dummy_udp_tun_tcp_packet;
5695 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5696 *offsets = dummy_udp_tun_tcp_packet_offsets;
5700 *pkt = dummy_udp_tun_udp_packet;
5701 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5702 *offsets = dummy_udp_tun_udp_packet_offsets;
5707 *pkt = dummy_udp_packet;
5708 *pkt_len = sizeof(dummy_udp_packet);
5709 *offsets = dummy_udp_packet_offsets;
5711 } else if (udp && ipv6) {
5712 *pkt = dummy_udp_ipv6_packet;
5713 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5714 *offsets = dummy_udp_ipv6_packet_offsets;
5716 } else if ((tcp && ipv6) || ipv6) {
5717 *pkt = dummy_tcp_ipv6_packet;
5718 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5719 *offsets = dummy_tcp_ipv6_packet_offsets;
5723 *pkt = dummy_tcp_packet;
5724 *pkt_len = sizeof(dummy_tcp_packet);
5725 *offsets = dummy_tcp_packet_offsets;
5729 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5731 * @lkups: lookup elements or match criteria for the advanced recipe, one
5732 * structure per protocol header
5733 * @lkups_cnt: number of protocols
5734 * @s_rule: stores rule information from the match criteria
5735 * @dummy_pkt: dummy packet to fill according to filter match criteria
5736 * @pkt_len: packet length of dummy packet
5737 * @offsets: offset info for the dummy packet
5739 static enum ice_status
5740 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5741 struct ice_aqc_sw_rules_elem *s_rule,
5742 const u8 *dummy_pkt, u16 pkt_len,
5743 const struct ice_dummy_pkt_offsets *offsets)
5748 /* Start with a packet with a pre-defined/dummy content. Then, fill
5749 * in the header values to be looked up or matched.
5751 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5753 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5755 for (i = 0; i < lkups_cnt; i++) {
5756 enum ice_protocol_type type;
5757 u16 offset = 0, len = 0, j;
5760 /* find the start of this layer; it should be found since this
5761 * was already checked when search for the dummy packet
5763 type = lkups[i].type;
5764 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5765 if (type == offsets[j].type) {
5766 offset = offsets[j].offset;
5771 /* this should never happen in a correct calling sequence */
5773 return ICE_ERR_PARAM;
5775 switch (lkups[i].type) {
5778 len = sizeof(struct ice_ether_hdr);
5781 len = sizeof(struct ice_ethtype_hdr);
5784 len = sizeof(struct ice_vlan_hdr);
5788 len = sizeof(struct ice_ipv4_hdr);
5792 /* Based on the same mechanism below, if tc (Traffic
5793 * Class) for IPv6 has mask, it means tc field is set.
5794 * Since tc is only one byte, we have to handle the
5795 * big/little endian issue before it can be inserted.
5797 if (lkups[i].m_u.ipv6_hdr.tc) {
5798 ((u16 *)&lkups[i].h_u)[0] =
5799 (((u16 *)&lkups[i].h_u)[0] << 8) |
5800 (((u16 *)&lkups[i].h_u)[0] >> 8);
5801 ((u16 *)&lkups[i].m_u)[0] =
5802 (((u16 *)&lkups[i].m_u)[0] << 8) |
5803 (((u16 *)&lkups[i].m_u)[0] >> 8);
5805 len = sizeof(struct ice_ipv6_hdr);
5810 len = sizeof(struct ice_l4_hdr);
5813 len = sizeof(struct ice_sctp_hdr);
5816 len = sizeof(struct ice_nvgre);
5821 len = sizeof(struct ice_udp_tnl_hdr);
5825 len = sizeof(struct ice_udp_gtp_hdr);
5828 len = sizeof(struct ice_pppoe_hdr);
5831 return ICE_ERR_PARAM;
5834 /* the length should be a word multiple */
5835 if (len % ICE_BYTES_PER_WORD)
5838 /* We have the offset to the header start, the length, the
5839 * caller's header values and mask. Use this information to
5840 * copy the data into the dummy packet appropriately based on
5841 * the mask. Note that we need to only write the bits as
5842 * indicated by the mask to make sure we don't improperly write
5843 * over any significant packet data.
5845 for (j = 0; j < len / sizeof(u16); j++)
5846 if (((u16 *)&lkups[i].m_u)[j])
5847 ((u16 *)(pkt + offset))[j] =
5848 (((u16 *)(pkt + offset))[j] &
5849 ~((u16 *)&lkups[i].m_u)[j]) |
5850 (((u16 *)&lkups[i].h_u)[j] &
5851 ((u16 *)&lkups[i].m_u)[j]);
5854 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5860 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5861 * @hw: pointer to the hardware structure
5862 * @tun_type: tunnel type
5863 * @pkt: dummy packet to fill in
5864 * @offsets: offset info for the dummy packet
5866 static enum ice_status
5867 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5868 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5873 case ICE_SW_TUN_AND_NON_TUN:
5874 case ICE_SW_TUN_VXLAN_GPE:
5875 case ICE_SW_TUN_VXLAN:
5876 case ICE_SW_TUN_UDP:
5877 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
5881 case ICE_SW_TUN_GENEVE:
5882 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
5887 /* Nothing needs to be done for this tunnel type */
5891 /* Find the outer UDP protocol header and insert the port number */
5892 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5893 if (offsets[i].type == ICE_UDP_OF) {
5894 struct ice_l4_hdr *hdr;
5897 offset = offsets[i].offset;
5898 hdr = (struct ice_l4_hdr *)&pkt[offset];
5899 hdr->dst_port = open_port << 8 | open_port >> 8;
5909 * ice_find_adv_rule_entry - Search a rule entry
5910 * @hw: pointer to the hardware structure
5911 * @lkups: lookup elements or match criteria for the advanced recipe, one
5912 * structure per protocol header
5913 * @lkups_cnt: number of protocols
5914 * @recp_id: recipe ID for which we are finding the rule
5915 * @rinfo: other information regarding the rule e.g. priority and action info
5917 * Helper function to search for a given advance rule entry
5918 * Returns pointer to entry storing the rule if found
5920 static struct ice_adv_fltr_mgmt_list_entry *
5921 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5922 u16 lkups_cnt, u8 recp_id,
5923 struct ice_adv_rule_info *rinfo)
5925 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5926 struct ice_switch_info *sw = hw->switch_info;
5929 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5930 ice_adv_fltr_mgmt_list_entry, list_entry) {
5931 bool lkups_matched = true;
5933 if (lkups_cnt != list_itr->lkups_cnt)
5935 for (i = 0; i < list_itr->lkups_cnt; i++)
5936 if (memcmp(&list_itr->lkups[i], &lkups[i],
5938 lkups_matched = false;
5941 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5942 rinfo->tun_type == list_itr->rule_info.tun_type &&
5950 * ice_adv_add_update_vsi_list
5951 * @hw: pointer to the hardware structure
5952 * @m_entry: pointer to current adv filter management list entry
5953 * @cur_fltr: filter information from the book keeping entry
5954 * @new_fltr: filter information with the new VSI to be added
5956 * Call AQ command to add or update previously created VSI list with new VSI.
5958 * Helper function to do book keeping associated with adding filter information
5959 * The algorithm to do the booking keeping is described below :
5960 * When a VSI needs to subscribe to a given advanced filter
5961 * if only one VSI has been added till now
5962 * Allocate a new VSI list and add two VSIs
5963 * to this list using switch rule command
5964 * Update the previously created switch rule with the
5965 * newly created VSI list ID
5966 * if a VSI list was previously created
5967 * Add the new VSI to the previously created VSI list set
5968 * using the update switch rule command
5970 static enum ice_status
5971 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5972 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5973 struct ice_adv_rule_info *cur_fltr,
5974 struct ice_adv_rule_info *new_fltr)
5976 enum ice_status status;
5977 u16 vsi_list_id = 0;
5979 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5980 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5981 return ICE_ERR_NOT_IMPL;
5983 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5984 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5985 return ICE_ERR_ALREADY_EXISTS;
5987 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5988 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5989 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5990 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5991 return ICE_ERR_NOT_IMPL;
5993 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5994 /* Only one entry existed in the mapping and it was not already
5995 * a part of a VSI list. So, create a VSI list with the old and
5998 struct ice_fltr_info tmp_fltr;
5999 u16 vsi_handle_arr[2];
6001 /* A rule already exists with the new VSI being added */
6002 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6003 new_fltr->sw_act.fwd_id.hw_vsi_id)
6004 return ICE_ERR_ALREADY_EXISTS;
6006 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6007 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6008 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6014 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6015 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6016 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6017 /* Update the previous switch rule of "forward to VSI" to
6020 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6024 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6025 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6026 m_entry->vsi_list_info =
6027 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6030 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6032 if (!m_entry->vsi_list_info)
6035 /* A rule already exists with the new VSI being added */
6036 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6039 /* Update the previously created VSI list set with
6040 * the new VSI ID passed in
6042 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6044 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6046 ice_aqc_opc_update_sw_rules,
6048 /* update VSI list mapping info with new VSI ID */
6050 ice_set_bit(vsi_handle,
6051 m_entry->vsi_list_info->vsi_map);
6054 m_entry->vsi_count++;
6059 * ice_add_adv_rule - helper function to create an advanced switch rule
6060 * @hw: pointer to the hardware structure
6061 * @lkups: information on the words that needs to be looked up. All words
6062 * together makes one recipe
6063 * @lkups_cnt: num of entries in the lkups array
6064 * @rinfo: other information related to the rule that needs to be programmed
6065 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6066 * ignored is case of error.
6068 * This function can program only 1 rule at a time. The lkups is used to
6069 * describe the all the words that forms the "lookup" portion of the recipe.
6070 * These words can span multiple protocols. Callers to this function need to
6071 * pass in a list of protocol headers with lookup information along and mask
6072 * that determines which words are valid from the given protocol header.
6073 * rinfo describes other information related to this rule such as forwarding
6074 * IDs, priority of this rule, etc.
6077 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6078 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6079 struct ice_rule_query_data *added_entry)
6081 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6082 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6083 const struct ice_dummy_pkt_offsets *pkt_offsets;
6084 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6085 struct LIST_HEAD_TYPE *rule_head;
6086 struct ice_switch_info *sw;
6087 enum ice_status status;
6088 const u8 *pkt = NULL;
6093 /* Initialize profile to result index bitmap */
6094 if (!hw->switch_info->prof_res_bm_init) {
6095 hw->switch_info->prof_res_bm_init = 1;
6096 ice_init_prof_result_bm(hw);
6100 return ICE_ERR_PARAM;
6102 /* get # of words we need to match */
6104 for (i = 0; i < lkups_cnt; i++) {
6107 ptr = (u16 *)&lkups[i].m_u;
6108 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6112 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6113 return ICE_ERR_PARAM;
6115 /* make sure that we can locate a dummy packet */
6116 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6119 status = ICE_ERR_PARAM;
6120 goto err_ice_add_adv_rule;
6123 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6124 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6125 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6126 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6129 vsi_handle = rinfo->sw_act.vsi_handle;
6130 if (!ice_is_vsi_valid(hw, vsi_handle))
6131 return ICE_ERR_PARAM;
6133 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6134 rinfo->sw_act.fwd_id.hw_vsi_id =
6135 ice_get_hw_vsi_num(hw, vsi_handle);
6136 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6137 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6139 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6142 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6144 /* we have to add VSI to VSI_LIST and increment vsi_count.
6145 * Also Update VSI list so that we can change forwarding rule
6146 * if the rule already exists, we will check if it exists with
6147 * same vsi_id, if not then add it to the VSI list if it already
6148 * exists if not then create a VSI list and add the existing VSI
6149 * ID and the new VSI ID to the list
6150 * We will add that VSI to the list
6152 status = ice_adv_add_update_vsi_list(hw, m_entry,
6153 &m_entry->rule_info,
6156 added_entry->rid = rid;
6157 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6158 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6162 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6163 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6165 return ICE_ERR_NO_MEMORY;
6166 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6167 switch (rinfo->sw_act.fltr_act) {
6168 case ICE_FWD_TO_VSI:
6169 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6170 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6171 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6174 act |= ICE_SINGLE_ACT_TO_Q;
6175 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6176 ICE_SINGLE_ACT_Q_INDEX_M;
6178 case ICE_FWD_TO_QGRP:
6179 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6180 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6181 act |= ICE_SINGLE_ACT_TO_Q;
6182 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6183 ICE_SINGLE_ACT_Q_INDEX_M;
6184 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6185 ICE_SINGLE_ACT_Q_REGION_M;
6187 case ICE_DROP_PACKET:
6188 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6189 ICE_SINGLE_ACT_VALID_BIT;
6192 status = ICE_ERR_CFG;
6193 goto err_ice_add_adv_rule;
6196 /* set the rule LOOKUP type based on caller specified 'RX'
6197 * instead of hardcoding it to be either LOOKUP_TX/RX
6199 * for 'RX' set the source to be the port number
6200 * for 'TX' set the source to be the source HW VSI number (determined
6204 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6205 s_rule->pdata.lkup_tx_rx.src =
6206 CPU_TO_LE16(hw->port_info->lport);
6208 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6209 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6212 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6213 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6215 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6216 pkt_len, pkt_offsets);
6218 goto err_ice_add_adv_rule;
6220 if (rinfo->tun_type != ICE_NON_TUN) {
6221 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6222 s_rule->pdata.lkup_tx_rx.hdr,
6225 goto err_ice_add_adv_rule;
6228 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6229 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6232 goto err_ice_add_adv_rule;
6233 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6234 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6236 status = ICE_ERR_NO_MEMORY;
6237 goto err_ice_add_adv_rule;
6240 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6241 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6242 ICE_NONDMA_TO_NONDMA);
6243 if (!adv_fltr->lkups) {
6244 status = ICE_ERR_NO_MEMORY;
6245 goto err_ice_add_adv_rule;
6248 adv_fltr->lkups_cnt = lkups_cnt;
6249 adv_fltr->rule_info = *rinfo;
6250 adv_fltr->rule_info.fltr_rule_id =
6251 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6252 sw = hw->switch_info;
6253 sw->recp_list[rid].adv_rule = true;
6254 rule_head = &sw->recp_list[rid].filt_rules;
6256 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6257 struct ice_fltr_info tmp_fltr;
6259 tmp_fltr.fltr_rule_id =
6260 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6261 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6262 tmp_fltr.fwd_id.hw_vsi_id =
6263 ice_get_hw_vsi_num(hw, vsi_handle);
6264 tmp_fltr.vsi_handle = vsi_handle;
6265 /* Update the previous switch rule of "forward to VSI" to
6268 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6270 goto err_ice_add_adv_rule;
6271 adv_fltr->vsi_count = 1;
6274 /* Add rule entry to book keeping list */
6275 LIST_ADD(&adv_fltr->list_entry, rule_head);
6277 added_entry->rid = rid;
6278 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6279 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6281 err_ice_add_adv_rule:
6282 if (status && adv_fltr) {
6283 ice_free(hw, adv_fltr->lkups);
6284 ice_free(hw, adv_fltr);
6287 ice_free(hw, s_rule);
6293 * ice_adv_rem_update_vsi_list
6294 * @hw: pointer to the hardware structure
6295 * @vsi_handle: VSI handle of the VSI to remove
6296 * @fm_list: filter management entry for which the VSI list management needs to
6299 static enum ice_status
6300 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6301 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6303 struct ice_vsi_list_map_info *vsi_list_info;
6304 enum ice_sw_lkup_type lkup_type;
6305 enum ice_status status;
6308 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6309 fm_list->vsi_count == 0)
6310 return ICE_ERR_PARAM;
6312 /* A rule with the VSI being removed does not exist */
6313 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6314 return ICE_ERR_DOES_NOT_EXIST;
6316 lkup_type = ICE_SW_LKUP_LAST;
6317 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6318 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6319 ice_aqc_opc_update_sw_rules,
6324 fm_list->vsi_count--;
6325 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6326 vsi_list_info = fm_list->vsi_list_info;
6327 if (fm_list->vsi_count == 1) {
6328 struct ice_fltr_info tmp_fltr;
6331 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6333 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6334 return ICE_ERR_OUT_OF_RANGE;
6336 /* Make sure VSI list is empty before removing it below */
6337 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6339 ice_aqc_opc_update_sw_rules,
6343 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6344 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6345 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6346 tmp_fltr.fwd_id.hw_vsi_id =
6347 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6348 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6349 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6351 /* Update the previous switch rule of "MAC forward to VSI" to
6352 * "MAC fwd to VSI list"
6354 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6356 ice_debug(hw, ICE_DBG_SW,
6357 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6358 tmp_fltr.fwd_id.hw_vsi_id, status);
6362 /* Remove the VSI list since it is no longer used */
6363 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6365 ice_debug(hw, ICE_DBG_SW,
6366 "Failed to remove VSI list %d, error %d\n",
6367 vsi_list_id, status);
6371 LIST_DEL(&vsi_list_info->list_entry);
6372 ice_free(hw, vsi_list_info);
6373 fm_list->vsi_list_info = NULL;
6380 * ice_rem_adv_rule - removes existing advanced switch rule
6381 * @hw: pointer to the hardware structure
6382 * @lkups: information on the words that needs to be looked up. All words
6383 * together makes one recipe
6384 * @lkups_cnt: num of entries in the lkups array
6385 * @rinfo: Its the pointer to the rule information for the rule
6387 * This function can be used to remove 1 rule at a time. The lkups is
6388 * used to describe all the words that forms the "lookup" portion of the
6389 * rule. These words can span multiple protocols. Callers to this function
6390 * need to pass in a list of protocol headers with lookup information along
6391 * and mask that determines which words are valid from the given protocol
6392 * header. rinfo describes other information related to this rule such as
6393 * forwarding IDs, priority of this rule, etc.
6396 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6397 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6399 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6400 struct ice_prot_lkup_ext lkup_exts;
6401 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6402 enum ice_status status = ICE_SUCCESS;
6403 bool remove_rule = false;
6404 u16 i, rid, vsi_handle;
6406 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6407 for (i = 0; i < lkups_cnt; i++) {
6410 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6413 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6418 /* Create any special protocol/offset pairs, such as looking at tunnel
6419 * bits by extracting metadata
6421 status = ice_add_special_words(rinfo, &lkup_exts);
6425 rid = ice_find_recp(hw, &lkup_exts);
6426 /* If did not find a recipe that match the existing criteria */
6427 if (rid == ICE_MAX_NUM_RECIPES)
6428 return ICE_ERR_PARAM;
6430 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6431 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6432 /* the rule is already removed */
6435 ice_acquire_lock(rule_lock);
6436 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6438 } else if (list_elem->vsi_count > 1) {
6439 list_elem->vsi_list_info->ref_cnt--;
6440 remove_rule = false;
6441 vsi_handle = rinfo->sw_act.vsi_handle;
6442 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6444 vsi_handle = rinfo->sw_act.vsi_handle;
6445 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6447 ice_release_lock(rule_lock);
6450 if (list_elem->vsi_count == 0)
6453 ice_release_lock(rule_lock);
6455 struct ice_aqc_sw_rules_elem *s_rule;
6458 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6460 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6463 return ICE_ERR_NO_MEMORY;
6464 s_rule->pdata.lkup_tx_rx.act = 0;
6465 s_rule->pdata.lkup_tx_rx.index =
6466 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6467 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6468 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6470 ice_aqc_opc_remove_sw_rules, NULL);
6471 if (status == ICE_SUCCESS) {
6472 ice_acquire_lock(rule_lock);
6473 LIST_DEL(&list_elem->list_entry);
6474 ice_free(hw, list_elem->lkups);
6475 ice_free(hw, list_elem);
6476 ice_release_lock(rule_lock);
6478 ice_free(hw, s_rule);
6484 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6485 * @hw: pointer to the hardware structure
6486 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6488 * This function is used to remove 1 rule at a time. The removal is based on
6489 * the remove_entry parameter. This function will remove rule for a given
6490 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6493 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6494 struct ice_rule_query_data *remove_entry)
6496 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6497 struct LIST_HEAD_TYPE *list_head;
6498 struct ice_adv_rule_info rinfo;
6499 struct ice_switch_info *sw;
6501 sw = hw->switch_info;
6502 if (!sw->recp_list[remove_entry->rid].recp_created)
6503 return ICE_ERR_PARAM;
6504 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6505 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6507 if (list_itr->rule_info.fltr_rule_id ==
6508 remove_entry->rule_id) {
6509 rinfo = list_itr->rule_info;
6510 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6511 return ice_rem_adv_rule(hw, list_itr->lkups,
6512 list_itr->lkups_cnt, &rinfo);
6515 return ICE_ERR_PARAM;
6519 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6521 * @hw: pointer to the hardware structure
6522 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6524 * This function is used to remove all the rules for a given VSI and as soon
6525 * as removing a rule fails, it will return immediately with the error code,
6526 * else it will return ICE_SUCCESS
6529 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6531 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6532 struct ice_vsi_list_map_info *map_info;
6533 struct LIST_HEAD_TYPE *list_head;
6534 struct ice_adv_rule_info rinfo;
6535 struct ice_switch_info *sw;
6536 enum ice_status status;
6537 u16 vsi_list_id = 0;
6540 sw = hw->switch_info;
6541 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6542 if (!sw->recp_list[rid].recp_created)
6544 if (!sw->recp_list[rid].adv_rule)
6546 list_head = &sw->recp_list[rid].filt_rules;
6548 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6549 ice_adv_fltr_mgmt_list_entry, list_entry) {
6550 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6554 rinfo = list_itr->rule_info;
6555 rinfo.sw_act.vsi_handle = vsi_handle;
6556 status = ice_rem_adv_rule(hw, list_itr->lkups,
6557 list_itr->lkups_cnt, &rinfo);
6567 * ice_replay_fltr - Replay all the filters stored by a specific list head
6568 * @hw: pointer to the hardware structure
6569 * @list_head: list for which filters needs to be replayed
6570 * @recp_id: Recipe ID for which rules need to be replayed
6572 static enum ice_status
6573 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6575 struct ice_fltr_mgmt_list_entry *itr;
6576 struct LIST_HEAD_TYPE l_head;
6577 enum ice_status status = ICE_SUCCESS;
6579 if (LIST_EMPTY(list_head))
6582 /* Move entries from the given list_head to a temporary l_head so that
6583 * they can be replayed. Otherwise when trying to re-add the same
6584 * filter, the function will return already exists
6586 LIST_REPLACE_INIT(list_head, &l_head);
6588 /* Mark the given list_head empty by reinitializing it so filters
6589 * could be added again by *handler
6591 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6593 struct ice_fltr_list_entry f_entry;
6595 f_entry.fltr_info = itr->fltr_info;
6596 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6597 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6598 if (status != ICE_SUCCESS)
6603 /* Add a filter per VSI separately */
6608 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6610 if (!ice_is_vsi_valid(hw, vsi_handle))
6613 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6614 f_entry.fltr_info.vsi_handle = vsi_handle;
6615 f_entry.fltr_info.fwd_id.hw_vsi_id =
6616 ice_get_hw_vsi_num(hw, vsi_handle);
6617 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6618 if (recp_id == ICE_SW_LKUP_VLAN)
6619 status = ice_add_vlan_internal(hw, &f_entry);
6621 status = ice_add_rule_internal(hw, recp_id,
6623 if (status != ICE_SUCCESS)
6628 /* Clear the filter management list */
6629 ice_rem_sw_rule_info(hw, &l_head);
6634 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6635 * @hw: pointer to the hardware structure
6637 * NOTE: This function does not clean up partially added filters on error.
6638 * It is up to caller of the function to issue a reset or fail early.
6640 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6642 struct ice_switch_info *sw = hw->switch_info;
6643 enum ice_status status = ICE_SUCCESS;
6646 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6647 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6649 status = ice_replay_fltr(hw, i, head);
6650 if (status != ICE_SUCCESS)
6657 * ice_replay_vsi_fltr - Replay filters for requested VSI
6658 * @hw: pointer to the hardware structure
6659 * @vsi_handle: driver VSI handle
6660 * @recp_id: Recipe ID for which rules need to be replayed
6661 * @list_head: list for which filters need to be replayed
6663 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6664 * It is required to pass valid VSI handle.
6666 static enum ice_status
6667 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6668 struct LIST_HEAD_TYPE *list_head)
6670 struct ice_fltr_mgmt_list_entry *itr;
6671 enum ice_status status = ICE_SUCCESS;
6674 if (LIST_EMPTY(list_head))
6676 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6678 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6680 struct ice_fltr_list_entry f_entry;
6682 f_entry.fltr_info = itr->fltr_info;
6683 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6684 itr->fltr_info.vsi_handle == vsi_handle) {
6685 /* update the src in case it is VSI num */
6686 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6687 f_entry.fltr_info.src = hw_vsi_id;
6688 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6689 if (status != ICE_SUCCESS)
6693 if (!itr->vsi_list_info ||
6694 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6696 /* Clearing it so that the logic can add it back */
6697 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6698 f_entry.fltr_info.vsi_handle = vsi_handle;
6699 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6700 /* update the src in case it is VSI num */
6701 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6702 f_entry.fltr_info.src = hw_vsi_id;
6703 if (recp_id == ICE_SW_LKUP_VLAN)
6704 status = ice_add_vlan_internal(hw, &f_entry);
6706 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6707 if (status != ICE_SUCCESS)
6715 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6716 * @hw: pointer to the hardware structure
6717 * @vsi_handle: driver VSI handle
6718 * @list_head: list for which filters need to be replayed
6720 * Replay the advanced rule for the given VSI.
6722 static enum ice_status
6723 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6724 struct LIST_HEAD_TYPE *list_head)
6726 struct ice_rule_query_data added_entry = { 0 };
6727 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6728 enum ice_status status = ICE_SUCCESS;
6730 if (LIST_EMPTY(list_head))
6732 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6734 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6735 u16 lk_cnt = adv_fltr->lkups_cnt;
6737 if (vsi_handle != rinfo->sw_act.vsi_handle)
6739 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6748 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6749 * @hw: pointer to the hardware structure
6750 * @vsi_handle: driver VSI handle
6752 * Replays filters for requested VSI via vsi_handle.
6754 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6756 struct ice_switch_info *sw = hw->switch_info;
6757 enum ice_status status;
6760 /* Update the recipes that were created */
6761 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6762 struct LIST_HEAD_TYPE *head;
6764 head = &sw->recp_list[i].filt_replay_rules;
6765 if (!sw->recp_list[i].adv_rule)
6766 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6768 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6769 if (status != ICE_SUCCESS)
6777 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6778 * @hw: pointer to the HW struct
6780 * Deletes the filter replay rules.
6782 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6784 struct ice_switch_info *sw = hw->switch_info;
6790 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6791 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6792 struct LIST_HEAD_TYPE *l_head;
6794 l_head = &sw->recp_list[i].filt_replay_rules;
6795 if (!sw->recp_list[i].adv_rule)
6796 ice_rem_sw_rule_info(hw, l_head);
6798 ice_rem_adv_rule_info(hw, l_head);