1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
65 { ICE_IPV4_OFOS, 14 },
70 { ICE_PROTOCOL_LAST, 0 },
74 u8 dummy_gre_tcp_packet[] = {
75 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x08, 0x00, /* ICE_ETYPE_OL 12 */
81 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x2F, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
87 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
88 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
91 0x00, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00,
95 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
104 0x50, 0x02, 0x20, 0x00,
105 0x00, 0x00, 0x00, 0x00
109 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
111 { ICE_ETYPE_OL, 12 },
112 { ICE_IPV4_OFOS, 14 },
116 { ICE_UDP_ILOS, 76 },
117 { ICE_PROTOCOL_LAST, 0 },
121 u8 dummy_gre_udp_packet[] = {
122 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x08, 0x00, /* ICE_ETYPE_OL 12 */
128 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x2F, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00,
134 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
135 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x00, 0x00, 0x00,
142 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
149 0x00, 0x08, 0x00, 0x00,
153 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
155 { ICE_ETYPE_OL, 12 },
156 { ICE_IPV4_OFOS, 14 },
163 { ICE_PROTOCOL_LAST, 0 },
167 u8 dummy_udp_tun_tcp_packet[] = {
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_OL 12 */
174 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
175 0x00, 0x01, 0x00, 0x00,
176 0x40, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
181 0x00, 0x46, 0x00, 0x00,
183 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
184 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
187 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
191 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
192 0x00, 0x01, 0x00, 0x00,
193 0x40, 0x06, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
198 0x00, 0x00, 0x00, 0x00,
199 0x00, 0x00, 0x00, 0x00,
200 0x50, 0x02, 0x20, 0x00,
201 0x00, 0x00, 0x00, 0x00
205 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
207 { ICE_ETYPE_OL, 12 },
208 { ICE_IPV4_OFOS, 14 },
214 { ICE_UDP_ILOS, 84 },
215 { ICE_PROTOCOL_LAST, 0 },
219 u8 dummy_udp_tun_udp_packet[] = {
220 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x08, 0x00, /* ICE_ETYPE_OL 12 */
226 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
227 0x00, 0x01, 0x00, 0x00,
228 0x00, 0x11, 0x00, 0x00,
229 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00,
232 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
233 0x00, 0x3a, 0x00, 0x00,
235 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
236 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
243 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
244 0x00, 0x01, 0x00, 0x00,
245 0x00, 0x11, 0x00, 0x00,
246 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
250 0x00, 0x08, 0x00, 0x00,
254 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
256 { ICE_ETYPE_OL, 12 },
257 { ICE_IPV4_OFOS, 14 },
258 { ICE_UDP_ILOS, 34 },
259 { ICE_PROTOCOL_LAST, 0 },
263 dummy_udp_packet[] = {
264 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
265 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00,
268 0x08, 0x00, /* ICE_ETYPE_OL 12 */
270 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
271 0x00, 0x01, 0x00, 0x00,
272 0x00, 0x11, 0x00, 0x00,
273 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
276 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
277 0x00, 0x08, 0x00, 0x00,
279 0x00, 0x00, /* 2 bytes for 4 byte alignment */
283 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
285 { ICE_ETYPE_OL, 12 },
286 { ICE_IPV4_OFOS, 14 },
288 { ICE_PROTOCOL_LAST, 0 },
292 dummy_tcp_packet[] = {
293 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
294 0x00, 0x00, 0x00, 0x00,
295 0x00, 0x00, 0x00, 0x00,
297 0x08, 0x00, /* ICE_ETYPE_OL 12 */
299 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
300 0x00, 0x01, 0x00, 0x00,
301 0x00, 0x06, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
306 0x00, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00,
308 0x50, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, /* 2 bytes for 4 byte alignment */
315 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
317 { ICE_ETYPE_OL, 12 },
318 { ICE_IPV6_OFOS, 14 },
320 { ICE_PROTOCOL_LAST, 0 },
324 dummy_tcp_ipv6_packet[] = {
325 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
326 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
329 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
331 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
332 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x50, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, /* 2 bytes for 4 byte alignment */
352 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
354 { ICE_ETYPE_OL, 12 },
355 { ICE_IPV6_OFOS, 14 },
356 { ICE_UDP_ILOS, 54 },
357 { ICE_PROTOCOL_LAST, 0 },
361 dummy_udp_ipv6_packet[] = {
362 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
363 0x00, 0x00, 0x00, 0x00,
364 0x00, 0x00, 0x00, 0x00,
366 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
368 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
369 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
380 0x00, 0x08, 0x00, 0x00,
382 0x00, 0x00, /* 2 bytes for 4 byte alignment */
386 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
388 { ICE_IPV4_OFOS, 14 },
391 { ICE_PROTOCOL_LAST, 0 },
395 dummy_udp_gtp_packet[] = {
396 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
401 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
402 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x11, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
408 0x00, 0x1c, 0x00, 0x00,
410 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
411 0x00, 0x00, 0x00, 0x00,
412 0x00, 0x00, 0x00, 0x85,
414 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
415 0x00, 0x00, 0x00, 0x00,
419 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
422 { ICE_PROTOCOL_LAST, 0 },
426 dummy_pppoe_packet[] = {
427 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
428 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, 0x00, 0x00,
432 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 14 */
433 0x00, 0x4e, 0x00, 0x21,
435 0x45, 0x00, 0x00, 0x30, /* PDU */
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x11, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
442 /* this is a recipe to profile association bitmap */
443 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
444 ICE_MAX_NUM_PROFILES);
446 /* this is a profile to recipe association bitmap */
447 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
448 ICE_MAX_NUM_RECIPES);
450 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
453 * ice_collect_result_idx - copy result index values
454 * @buf: buffer that contains the result index
455 * @recp: the recipe struct to copy data into
457 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
458 struct ice_sw_recipe *recp)
460 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
461 ice_set_bit(buf->content.result_indx &
462 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
466 * ice_init_possible_res_bm - initialize possible result bitmap
467 * @pos_result_bm: pointer to the bitmap to initialize
469 static void ice_init_possible_res_bm(ice_bitmap_t *pos_result_bm)
473 ice_zero_bitmap(pos_result_bm, ICE_MAX_FV_WORDS);
475 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
476 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
477 ice_set_bit(bit, pos_result_bm);
481 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
482 * @hw: pointer to hardware structure
483 * @recps: struct that we need to populate
484 * @rid: recipe ID that we are populating
485 * @refresh_required: true if we should get recipe to profile mapping from FW
487 * This function is used to populate all the necessary entries into our
488 * bookkeeping so that we have a current list of all the recipes that are
489 * programmed in the firmware.
491 static enum ice_status
492 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
493 bool *refresh_required)
495 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
496 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
497 struct ice_aqc_recipe_data_elem *tmp;
498 u16 num_recps = ICE_MAX_NUM_RECIPES;
499 struct ice_prot_lkup_ext *lkup_exts;
500 u16 i, sub_recps, fv_word_idx = 0;
501 enum ice_status status;
503 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
504 ice_init_possible_res_bm(possible_idx);
506 /* we need a buffer big enough to accommodate all the recipes */
507 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
508 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
510 return ICE_ERR_NO_MEMORY;
512 tmp[0].recipe_indx = rid;
513 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
514 /* non-zero status meaning recipe doesn't exist */
518 /* Get recipe to profile map so that we can get the fv from lkups that
519 * we read for a recipe from FW. Since we want to minimize the number of
520 * times we make this FW call, just make one call and cache the copy
521 * until a new recipe is added. This operation is only required the
522 * first time to get the changes from FW. Then to search existing
523 * entries we don't need to update the cache again until another recipe
526 if (*refresh_required) {
527 ice_get_recp_to_prof_map(hw);
528 *refresh_required = false;
531 /* Start populating all the entries for recps[rid] based on lkups from
532 * firmware. Note that we are only creating the root recipe in our
535 lkup_exts = &recps[rid].lkup_exts;
537 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
538 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
539 struct ice_recp_grp_entry *rg_entry;
540 u8 prof_id, idx, prot = 0;
544 rg_entry = (struct ice_recp_grp_entry *)
545 ice_malloc(hw, sizeof(*rg_entry));
547 status = ICE_ERR_NO_MEMORY;
551 idx = root_bufs.recipe_indx;
552 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
554 /* Mark all result indices in this chain */
555 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
556 ice_set_bit(root_bufs.content.result_indx &
557 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
559 /* get the first profile that is associated with rid */
560 prof_id = ice_find_first_bit(recipe_to_profile[idx],
561 ICE_MAX_NUM_PROFILES);
562 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
563 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
565 rg_entry->fv_idx[i] = lkup_indx;
566 rg_entry->fv_mask[i] =
567 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
569 /* If the recipe is a chained recipe then all its
570 * child recipe's result will have a result index.
571 * To fill fv_words we should not use those result
572 * index, we only need the protocol ids and offsets.
573 * We will skip all the fv_idx which stores result
574 * index in them. We also need to skip any fv_idx which
575 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
576 * valid offset value.
578 if (ice_is_bit_set(possible_idx, rg_entry->fv_idx[i]) ||
579 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
580 rg_entry->fv_idx[i] == 0)
583 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
584 rg_entry->fv_idx[i], &prot, &off);
585 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
586 lkup_exts->fv_words[fv_word_idx].off = off;
589 /* populate rg_list with the data from the child entry of this
592 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
594 /* Propagate some data to the recipe database */
595 recps[idx].is_root = is_root;
596 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
597 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
598 recps[idx].chain_idx = root_bufs.content.result_indx &
599 ~ICE_AQ_RECIPE_RESULT_EN;
601 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
606 /* Only do the following for root recipes entries */
607 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
608 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
609 recps[idx].root_rid = root_bufs.content.rid &
610 ~ICE_AQ_RECIPE_ID_IS_ROOT;
611 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
612 recps[idx].big_recp = (recps[rid].n_grp_count > 1);
615 /* Complete initialization of the root recipe entry */
616 lkup_exts->n_val_words = fv_word_idx;
617 recps[rid].n_grp_count = num_recps;
618 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
619 ice_calloc(hw, recps[rid].n_grp_count,
620 sizeof(struct ice_aqc_recipe_data_elem));
621 if (!recps[rid].root_buf)
624 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
625 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
627 /* Copy result indexes */
628 ice_memcpy(recps[rid].res_idxs, result_bm, sizeof(recps[rid].res_idxs),
629 ICE_NONDMA_TO_NONDMA);
630 recps[rid].recp_created = true;
638 * ice_get_recp_to_prof_map - updates recipe to profile mapping
639 * @hw: pointer to hardware structure
641 * This function is used to populate recipe_to_profile matrix where index to
642 * this array is the recipe ID and the element is the mapping of which profiles
643 * is this recipe mapped to.
646 ice_get_recp_to_prof_map(struct ice_hw *hw)
648 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
651 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
654 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
655 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
656 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
658 ice_memcpy(profile_to_recipe[i], r_bitmap,
659 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
660 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
661 if (ice_is_bit_set(r_bitmap, j))
662 ice_set_bit(i, recipe_to_profile[j]);
667 * ice_init_def_sw_recp - initialize the recipe book keeping tables
668 * @hw: pointer to the HW struct
670 * Allocate memory for the entire recipe table and initialize the structures/
671 * entries corresponding to basic recipes.
673 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
675 struct ice_sw_recipe *recps;
678 recps = (struct ice_sw_recipe *)
679 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
681 return ICE_ERR_NO_MEMORY;
683 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
684 recps[i].root_rid = i;
685 INIT_LIST_HEAD(&recps[i].filt_rules);
686 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
687 INIT_LIST_HEAD(&recps[i].rg_list);
688 ice_init_lock(&recps[i].filt_rule_lock);
691 hw->switch_info->recp_list = recps;
697 * ice_aq_get_sw_cfg - get switch configuration
698 * @hw: pointer to the hardware structure
699 * @buf: pointer to the result buffer
700 * @buf_size: length of the buffer available for response
701 * @req_desc: pointer to requested descriptor
702 * @num_elems: pointer to number of elements
703 * @cd: pointer to command details structure or NULL
705 * Get switch configuration (0x0200) to be placed in 'buff'.
706 * This admin command returns information such as initial VSI/port number
707 * and switch ID it belongs to.
709 * NOTE: *req_desc is both an input/output parameter.
710 * The caller of this function first calls this function with *request_desc set
711 * to 0. If the response from f/w has *req_desc set to 0, all the switch
712 * configuration information has been returned; if non-zero (meaning not all
713 * the information was returned), the caller should call this function again
714 * with *req_desc set to the previous value returned by f/w to get the
715 * next block of switch configuration information.
717 * *num_elems is output only parameter. This reflects the number of elements
718 * in response buffer. The caller of this function to use *num_elems while
719 * parsing the response buffer.
721 static enum ice_status
722 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
723 u16 buf_size, u16 *req_desc, u16 *num_elems,
724 struct ice_sq_cd *cd)
726 struct ice_aqc_get_sw_cfg *cmd;
727 enum ice_status status;
728 struct ice_aq_desc desc;
730 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
731 cmd = &desc.params.get_sw_conf;
732 cmd->element = CPU_TO_LE16(*req_desc);
734 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
736 *req_desc = LE16_TO_CPU(cmd->element);
737 *num_elems = LE16_TO_CPU(cmd->num_elems);
745 * ice_alloc_sw - allocate resources specific to switch
746 * @hw: pointer to the HW struct
747 * @ena_stats: true to turn on VEB stats
748 * @shared_res: true for shared resource, false for dedicated resource
749 * @sw_id: switch ID returned
750 * @counter_id: VEB counter ID returned
752 * allocates switch resources (SWID and VEB counter) (0x0208)
755 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
758 struct ice_aqc_alloc_free_res_elem *sw_buf;
759 struct ice_aqc_res_elem *sw_ele;
760 enum ice_status status;
763 buf_len = sizeof(*sw_buf);
764 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
765 ice_malloc(hw, buf_len);
767 return ICE_ERR_NO_MEMORY;
769 /* Prepare buffer for switch ID.
770 * The number of resource entries in buffer is passed as 1 since only a
771 * single switch/VEB instance is allocated, and hence a single sw_id
774 sw_buf->num_elems = CPU_TO_LE16(1);
776 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
777 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
778 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
780 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
781 ice_aqc_opc_alloc_res, NULL);
784 goto ice_alloc_sw_exit;
786 sw_ele = &sw_buf->elem[0];
787 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
790 /* Prepare buffer for VEB Counter */
791 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
792 struct ice_aqc_alloc_free_res_elem *counter_buf;
793 struct ice_aqc_res_elem *counter_ele;
795 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
796 ice_malloc(hw, buf_len);
798 status = ICE_ERR_NO_MEMORY;
799 goto ice_alloc_sw_exit;
802 /* The number of resource entries in buffer is passed as 1 since
803 * only a single switch/VEB instance is allocated, and hence a
804 * single VEB counter is requested.
806 counter_buf->num_elems = CPU_TO_LE16(1);
807 counter_buf->res_type =
808 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
809 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
810 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
814 ice_free(hw, counter_buf);
815 goto ice_alloc_sw_exit;
817 counter_ele = &counter_buf->elem[0];
818 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
819 ice_free(hw, counter_buf);
823 ice_free(hw, sw_buf);
828 * ice_free_sw - free resources specific to switch
829 * @hw: pointer to the HW struct
830 * @sw_id: switch ID returned
831 * @counter_id: VEB counter ID returned
833 * free switch resources (SWID and VEB counter) (0x0209)
835 * NOTE: This function frees multiple resources. It continues
836 * releasing other resources even after it encounters error.
837 * The error code returned is the last error it encountered.
839 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
841 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
842 enum ice_status status, ret_status;
845 buf_len = sizeof(*sw_buf);
846 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
847 ice_malloc(hw, buf_len);
849 return ICE_ERR_NO_MEMORY;
851 /* Prepare buffer to free for switch ID res.
852 * The number of resource entries in buffer is passed as 1 since only a
853 * single switch/VEB instance is freed, and hence a single sw_id
856 sw_buf->num_elems = CPU_TO_LE16(1);
857 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
858 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
860 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
861 ice_aqc_opc_free_res, NULL);
864 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
866 /* Prepare buffer to free for VEB Counter resource */
867 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
868 ice_malloc(hw, buf_len);
870 ice_free(hw, sw_buf);
871 return ICE_ERR_NO_MEMORY;
874 /* The number of resource entries in buffer is passed as 1 since only a
875 * single switch/VEB instance is freed, and hence a single VEB counter
878 counter_buf->num_elems = CPU_TO_LE16(1);
879 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
880 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
882 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
883 ice_aqc_opc_free_res, NULL);
885 ice_debug(hw, ICE_DBG_SW,
886 "VEB counter resource could not be freed\n");
890 ice_free(hw, counter_buf);
891 ice_free(hw, sw_buf);
897 * @hw: pointer to the HW struct
898 * @vsi_ctx: pointer to a VSI context struct
899 * @cd: pointer to command details structure or NULL
901 * Add a VSI context to the hardware (0x0210)
904 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
905 struct ice_sq_cd *cd)
907 struct ice_aqc_add_update_free_vsi_resp *res;
908 struct ice_aqc_add_get_update_free_vsi *cmd;
909 struct ice_aq_desc desc;
910 enum ice_status status;
912 cmd = &desc.params.vsi_cmd;
913 res = &desc.params.add_update_free_vsi_res;
915 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
917 if (!vsi_ctx->alloc_from_pool)
918 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
919 ICE_AQ_VSI_IS_VALID);
921 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
923 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
925 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
926 sizeof(vsi_ctx->info), cd);
929 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
930 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
931 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
939 * @hw: pointer to the HW struct
940 * @vsi_ctx: pointer to a VSI context struct
941 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
942 * @cd: pointer to command details structure or NULL
944 * Free VSI context info from hardware (0x0213)
947 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
948 bool keep_vsi_alloc, struct ice_sq_cd *cd)
950 struct ice_aqc_add_update_free_vsi_resp *resp;
951 struct ice_aqc_add_get_update_free_vsi *cmd;
952 struct ice_aq_desc desc;
953 enum ice_status status;
955 cmd = &desc.params.vsi_cmd;
956 resp = &desc.params.add_update_free_vsi_res;
958 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
960 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
962 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
964 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
966 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
967 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
975 * @hw: pointer to the HW struct
976 * @vsi_ctx: pointer to a VSI context struct
977 * @cd: pointer to command details structure or NULL
979 * Update VSI context in the hardware (0x0211)
982 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
983 struct ice_sq_cd *cd)
985 struct ice_aqc_add_update_free_vsi_resp *resp;
986 struct ice_aqc_add_get_update_free_vsi *cmd;
987 struct ice_aq_desc desc;
988 enum ice_status status;
990 cmd = &desc.params.vsi_cmd;
991 resp = &desc.params.add_update_free_vsi_res;
993 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
995 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
997 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
999 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1000 sizeof(vsi_ctx->info), cd);
1003 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1004 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1011 * ice_is_vsi_valid - check whether the VSI is valid or not
1012 * @hw: pointer to the HW struct
1013 * @vsi_handle: VSI handle
1015 * check whether the VSI is valid or not
1017 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1019 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1023 * ice_get_hw_vsi_num - return the HW VSI number
1024 * @hw: pointer to the HW struct
1025 * @vsi_handle: VSI handle
1027 * return the HW VSI number
1028 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1030 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1032 return hw->vsi_ctx[vsi_handle]->vsi_num;
1036 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1037 * @hw: pointer to the HW struct
1038 * @vsi_handle: VSI handle
1040 * return the VSI context entry for a given VSI handle
1042 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1044 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1048 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1049 * @hw: pointer to the HW struct
1050 * @vsi_handle: VSI handle
1051 * @vsi: VSI context pointer
1053 * save the VSI context entry for a given VSI handle
1056 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1058 hw->vsi_ctx[vsi_handle] = vsi;
1062 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1063 * @hw: pointer to the HW struct
1064 * @vsi_handle: VSI handle
1066 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1068 struct ice_vsi_ctx *vsi;
1071 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1074 ice_for_each_traffic_class(i) {
1075 if (vsi->lan_q_ctx[i]) {
1076 ice_free(hw, vsi->lan_q_ctx[i]);
1077 vsi->lan_q_ctx[i] = NULL;
1083 * ice_clear_vsi_ctx - clear the VSI context entry
1084 * @hw: pointer to the HW struct
1085 * @vsi_handle: VSI handle
1087 * clear the VSI context entry
1089 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1091 struct ice_vsi_ctx *vsi;
1093 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1095 ice_clear_vsi_q_ctx(hw, vsi_handle);
1097 hw->vsi_ctx[vsi_handle] = NULL;
1102 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1103 * @hw: pointer to the HW struct
1105 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1109 for (i = 0; i < ICE_MAX_VSI; i++)
1110 ice_clear_vsi_ctx(hw, i);
1114 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1115 * @hw: pointer to the HW struct
1116 * @vsi_handle: unique VSI handle provided by drivers
1117 * @vsi_ctx: pointer to a VSI context struct
1118 * @cd: pointer to command details structure or NULL
1120 * Add a VSI context to the hardware also add it into the VSI handle list.
1121 * If this function gets called after reset for existing VSIs then update
1122 * with the new HW VSI number in the corresponding VSI handle list entry.
1125 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1126 struct ice_sq_cd *cd)
1128 struct ice_vsi_ctx *tmp_vsi_ctx;
1129 enum ice_status status;
1131 if (vsi_handle >= ICE_MAX_VSI)
1132 return ICE_ERR_PARAM;
1133 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1136 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1138 /* Create a new VSI context */
1139 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1140 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1142 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1143 return ICE_ERR_NO_MEMORY;
1145 *tmp_vsi_ctx = *vsi_ctx;
1147 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1149 /* update with new HW VSI num */
1150 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1157 * ice_free_vsi- free VSI context from hardware and VSI handle list
1158 * @hw: pointer to the HW struct
1159 * @vsi_handle: unique VSI handle
1160 * @vsi_ctx: pointer to a VSI context struct
1161 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1162 * @cd: pointer to command details structure or NULL
1164 * Free VSI context info from hardware as well as from VSI handle list
1167 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1168 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1170 enum ice_status status;
1172 if (!ice_is_vsi_valid(hw, vsi_handle))
1173 return ICE_ERR_PARAM;
1174 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1175 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1177 ice_clear_vsi_ctx(hw, vsi_handle);
1183 * @hw: pointer to the HW struct
1184 * @vsi_handle: unique VSI handle
1185 * @vsi_ctx: pointer to a VSI context struct
1186 * @cd: pointer to command details structure or NULL
1188 * Update VSI context in the hardware
1191 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1192 struct ice_sq_cd *cd)
1194 if (!ice_is_vsi_valid(hw, vsi_handle))
1195 return ICE_ERR_PARAM;
1196 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1197 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1201 * ice_aq_get_vsi_params
1202 * @hw: pointer to the HW struct
1203 * @vsi_ctx: pointer to a VSI context struct
1204 * @cd: pointer to command details structure or NULL
1206 * Get VSI context info from hardware (0x0212)
1209 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1210 struct ice_sq_cd *cd)
1212 struct ice_aqc_add_get_update_free_vsi *cmd;
1213 struct ice_aqc_get_vsi_resp *resp;
1214 struct ice_aq_desc desc;
1215 enum ice_status status;
1217 cmd = &desc.params.vsi_cmd;
1218 resp = &desc.params.get_vsi_resp;
1220 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1222 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1224 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1225 sizeof(vsi_ctx->info), cd);
1227 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1229 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1230 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1237 * ice_aq_add_update_mir_rule - add/update a mirror rule
1238 * @hw: pointer to the HW struct
1239 * @rule_type: Rule Type
1240 * @dest_vsi: VSI number to which packets will be mirrored
1241 * @count: length of the list
1242 * @mr_buf: buffer for list of mirrored VSI numbers
1243 * @cd: pointer to command details structure or NULL
1246 * Add/Update Mirror Rule (0x260).
1249 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1250 u16 count, struct ice_mir_rule_buf *mr_buf,
1251 struct ice_sq_cd *cd, u16 *rule_id)
1253 struct ice_aqc_add_update_mir_rule *cmd;
1254 struct ice_aq_desc desc;
1255 enum ice_status status;
1256 __le16 *mr_list = NULL;
1259 switch (rule_type) {
1260 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1261 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1262 /* Make sure count and mr_buf are set for these rule_types */
1263 if (!(count && mr_buf))
1264 return ICE_ERR_PARAM;
1266 buf_size = count * sizeof(__le16);
1267 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1269 return ICE_ERR_NO_MEMORY;
1271 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1272 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1273 /* Make sure count and mr_buf are not set for these
1276 if (count || mr_buf)
1277 return ICE_ERR_PARAM;
1280 ice_debug(hw, ICE_DBG_SW,
1281 "Error due to unsupported rule_type %u\n", rule_type);
1282 return ICE_ERR_OUT_OF_RANGE;
1285 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1287 /* Pre-process 'mr_buf' items for add/update of virtual port
1288 * ingress/egress mirroring (but not physical port ingress/egress
1294 for (i = 0; i < count; i++) {
1297 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1299 /* Validate specified VSI number, make sure it is less
1300 * than ICE_MAX_VSI, if not return with error.
1302 if (id >= ICE_MAX_VSI) {
1303 ice_debug(hw, ICE_DBG_SW,
1304 "Error VSI index (%u) out-of-range\n",
1306 ice_free(hw, mr_list);
1307 return ICE_ERR_OUT_OF_RANGE;
1310 /* add VSI to mirror rule */
1313 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1314 else /* remove VSI from mirror rule */
1315 mr_list[i] = CPU_TO_LE16(id);
1319 cmd = &desc.params.add_update_rule;
1320 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1321 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1322 ICE_AQC_RULE_ID_VALID_M);
1323 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1324 cmd->num_entries = CPU_TO_LE16(count);
1325 cmd->dest = CPU_TO_LE16(dest_vsi);
1327 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1329 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1331 ice_free(hw, mr_list);
1337 * ice_aq_delete_mir_rule - delete a mirror rule
1338 * @hw: pointer to the HW struct
1339 * @rule_id: Mirror rule ID (to be deleted)
1340 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1341 * otherwise it is returned to the shared pool
1342 * @cd: pointer to command details structure or NULL
1344 * Delete Mirror Rule (0x261).
1347 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1348 struct ice_sq_cd *cd)
1350 struct ice_aqc_delete_mir_rule *cmd;
1351 struct ice_aq_desc desc;
1353 /* rule_id should be in the range 0...63 */
1354 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1355 return ICE_ERR_OUT_OF_RANGE;
1357 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1359 cmd = &desc.params.del_rule;
1360 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1361 cmd->rule_id = CPU_TO_LE16(rule_id);
1364 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1366 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1370 * ice_aq_alloc_free_vsi_list
1371 * @hw: pointer to the HW struct
1372 * @vsi_list_id: VSI list ID returned or used for lookup
1373 * @lkup_type: switch rule filter lookup type
1374 * @opc: switch rules population command type - pass in the command opcode
1376 * allocates or free a VSI list resource
1378 static enum ice_status
1379 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1380 enum ice_sw_lkup_type lkup_type,
1381 enum ice_adminq_opc opc)
1383 struct ice_aqc_alloc_free_res_elem *sw_buf;
1384 struct ice_aqc_res_elem *vsi_ele;
1385 enum ice_status status;
1388 buf_len = sizeof(*sw_buf);
1389 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1390 ice_malloc(hw, buf_len);
1392 return ICE_ERR_NO_MEMORY;
1393 sw_buf->num_elems = CPU_TO_LE16(1);
1395 if (lkup_type == ICE_SW_LKUP_MAC ||
1396 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1397 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1398 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1399 lkup_type == ICE_SW_LKUP_PROMISC ||
1400 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1401 lkup_type == ICE_SW_LKUP_LAST) {
1402 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1403 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1405 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1407 status = ICE_ERR_PARAM;
1408 goto ice_aq_alloc_free_vsi_list_exit;
1411 if (opc == ice_aqc_opc_free_res)
1412 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1414 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1416 goto ice_aq_alloc_free_vsi_list_exit;
1418 if (opc == ice_aqc_opc_alloc_res) {
1419 vsi_ele = &sw_buf->elem[0];
1420 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1423 ice_aq_alloc_free_vsi_list_exit:
1424 ice_free(hw, sw_buf);
1429 * ice_aq_set_storm_ctrl - Sets storm control configuration
1430 * @hw: pointer to the HW struct
1431 * @bcast_thresh: represents the upper threshold for broadcast storm control
1432 * @mcast_thresh: represents the upper threshold for multicast storm control
1433 * @ctl_bitmask: storm control control knobs
1435 * Sets the storm control configuration (0x0280)
1438 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1441 struct ice_aqc_storm_cfg *cmd;
1442 struct ice_aq_desc desc;
1444 cmd = &desc.params.storm_conf;
1446 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1448 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1449 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1450 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1452 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1456 * ice_aq_get_storm_ctrl - gets storm control configuration
1457 * @hw: pointer to the HW struct
1458 * @bcast_thresh: represents the upper threshold for broadcast storm control
1459 * @mcast_thresh: represents the upper threshold for multicast storm control
1460 * @ctl_bitmask: storm control control knobs
1462 * Gets the storm control configuration (0x0281)
1465 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1468 enum ice_status status;
1469 struct ice_aq_desc desc;
1471 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1473 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1475 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1478 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1481 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1484 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1491 * ice_aq_sw_rules - add/update/remove switch rules
1492 * @hw: pointer to the HW struct
1493 * @rule_list: pointer to switch rule population list
1494 * @rule_list_sz: total size of the rule list in bytes
1495 * @num_rules: number of switch rules in the rule_list
1496 * @opc: switch rules population command type - pass in the command opcode
1497 * @cd: pointer to command details structure or NULL
1499 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1501 static enum ice_status
1502 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1503 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1505 struct ice_aq_desc desc;
1507 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1509 if (opc != ice_aqc_opc_add_sw_rules &&
1510 opc != ice_aqc_opc_update_sw_rules &&
1511 opc != ice_aqc_opc_remove_sw_rules)
1512 return ICE_ERR_PARAM;
1514 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1516 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1517 desc.params.sw_rules.num_rules_fltr_entry_index =
1518 CPU_TO_LE16(num_rules);
1519 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1523 * ice_aq_add_recipe - add switch recipe
1524 * @hw: pointer to the HW struct
1525 * @s_recipe_list: pointer to switch rule population list
1526 * @num_recipes: number of switch recipes in the list
1527 * @cd: pointer to command details structure or NULL
1532 ice_aq_add_recipe(struct ice_hw *hw,
1533 struct ice_aqc_recipe_data_elem *s_recipe_list,
1534 u16 num_recipes, struct ice_sq_cd *cd)
1536 struct ice_aqc_add_get_recipe *cmd;
1537 struct ice_aq_desc desc;
1540 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1541 cmd = &desc.params.add_get_recipe;
1542 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1544 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1545 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1547 buf_size = num_recipes * sizeof(*s_recipe_list);
1549 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1553 * ice_aq_get_recipe - get switch recipe
1554 * @hw: pointer to the HW struct
1555 * @s_recipe_list: pointer to switch rule population list
1556 * @num_recipes: pointer to the number of recipes (input and output)
1557 * @recipe_root: root recipe number of recipe(s) to retrieve
1558 * @cd: pointer to command details structure or NULL
1562 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1563 * On output, *num_recipes will equal the number of entries returned in
1566 * The caller must supply enough space in s_recipe_list to hold all possible
1567 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1570 ice_aq_get_recipe(struct ice_hw *hw,
1571 struct ice_aqc_recipe_data_elem *s_recipe_list,
1572 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1574 struct ice_aqc_add_get_recipe *cmd;
1575 struct ice_aq_desc desc;
1576 enum ice_status status;
1579 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1580 return ICE_ERR_PARAM;
1582 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1583 cmd = &desc.params.add_get_recipe;
1584 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1586 cmd->return_index = CPU_TO_LE16(recipe_root);
1587 cmd->num_sub_recipes = 0;
1589 buf_size = *num_recipes * sizeof(*s_recipe_list);
1591 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1592 /* cppcheck-suppress constArgument */
1593 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1599 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1600 * @hw: pointer to the HW struct
1601 * @profile_id: package profile ID to associate the recipe with
1602 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1603 * @cd: pointer to command details structure or NULL
1604 * Recipe to profile association (0x0291)
1607 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1608 struct ice_sq_cd *cd)
1610 struct ice_aqc_recipe_to_profile *cmd;
1611 struct ice_aq_desc desc;
1613 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1614 cmd = &desc.params.recipe_to_profile;
1615 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1616 cmd->profile_id = CPU_TO_LE16(profile_id);
1617 /* Set the recipe ID bit in the bitmask to let the device know which
1618 * profile we are associating the recipe to
1620 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1621 ICE_NONDMA_TO_NONDMA);
1623 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1627 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1628 * @hw: pointer to the HW struct
1629 * @profile_id: package profile ID to associate the recipe with
1630 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1631 * @cd: pointer to command details structure or NULL
1632 * Associate profile ID with given recipe (0x0293)
1635 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1636 struct ice_sq_cd *cd)
1638 struct ice_aqc_recipe_to_profile *cmd;
1639 struct ice_aq_desc desc;
1640 enum ice_status status;
1642 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1643 cmd = &desc.params.recipe_to_profile;
1644 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1645 cmd->profile_id = CPU_TO_LE16(profile_id);
1647 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1649 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1650 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1656 * ice_alloc_recipe - add recipe resource
1657 * @hw: pointer to the hardware structure
1658 * @rid: recipe ID returned as response to AQ call
1660 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1662 struct ice_aqc_alloc_free_res_elem *sw_buf;
1663 enum ice_status status;
1666 buf_len = sizeof(*sw_buf);
1667 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1669 return ICE_ERR_NO_MEMORY;
1671 sw_buf->num_elems = CPU_TO_LE16(1);
1672 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1673 ICE_AQC_RES_TYPE_S) |
1674 ICE_AQC_RES_TYPE_FLAG_SHARED);
1675 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1676 ice_aqc_opc_alloc_res, NULL);
1678 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1679 ice_free(hw, sw_buf);
1684 /* ice_init_port_info - Initialize port_info with switch configuration data
1685 * @pi: pointer to port_info
1686 * @vsi_port_num: VSI number or port number
1687 * @type: Type of switch element (port or VSI)
1688 * @swid: switch ID of the switch the element is attached to
1689 * @pf_vf_num: PF or VF number
1690 * @is_vf: true if the element is a VF, false otherwise
1693 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1694 u16 swid, u16 pf_vf_num, bool is_vf)
1697 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1698 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1700 pi->pf_vf_num = pf_vf_num;
1702 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1703 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1706 ice_debug(pi->hw, ICE_DBG_SW,
1707 "incorrect VSI/port type received\n");
1712 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1713 * @hw: pointer to the hardware structure
1715 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1717 struct ice_aqc_get_sw_cfg_resp *rbuf;
1718 enum ice_status status;
1719 u16 num_total_ports;
1725 num_total_ports = 1;
1727 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1728 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1731 return ICE_ERR_NO_MEMORY;
1733 /* Multiple calls to ice_aq_get_sw_cfg may be required
1734 * to get all the switch configuration information. The need
1735 * for additional calls is indicated by ice_aq_get_sw_cfg
1736 * writing a non-zero value in req_desc
1739 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1740 &req_desc, &num_elems, NULL);
1745 for (i = 0; i < num_elems; i++) {
1746 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1747 u16 pf_vf_num, swid, vsi_port_num;
1751 ele = rbuf[i].elements;
1752 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1753 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1755 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1756 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1758 swid = LE16_TO_CPU(ele->swid);
1760 if (LE16_TO_CPU(ele->pf_vf_num) &
1761 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1764 type = LE16_TO_CPU(ele->vsi_port_num) >>
1765 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1768 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1769 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1770 if (j == num_total_ports) {
1771 ice_debug(hw, ICE_DBG_SW,
1772 "more ports than expected\n");
1773 status = ICE_ERR_CFG;
1776 ice_init_port_info(hw->port_info,
1777 vsi_port_num, type, swid,
1785 } while (req_desc && !status);
1789 ice_free(hw, (void *)rbuf);
1795 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1796 * @hw: pointer to the hardware structure
1797 * @fi: filter info structure to fill/update
1799 * This helper function populates the lb_en and lan_en elements of the provided
1800 * ice_fltr_info struct using the switch's type and characteristics of the
1801 * switch rule being configured.
1803 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1807 if ((fi->flag & ICE_FLTR_TX) &&
1808 (fi->fltr_act == ICE_FWD_TO_VSI ||
1809 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1810 fi->fltr_act == ICE_FWD_TO_Q ||
1811 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1812 /* Setting LB for prune actions will result in replicated
1813 * packets to the internal switch that will be dropped.
1815 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1818 /* Set lan_en to TRUE if
1819 * 1. The switch is a VEB AND
1821 * 2.1 The lookup is a directional lookup like ethertype,
1822 * promiscuous, ethertype-MAC, promiscuous-VLAN
1823 * and default-port OR
1824 * 2.2 The lookup is VLAN, OR
1825 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1826 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1830 * The switch is a VEPA.
1832 * In all other cases, the LAN enable has to be set to false.
1835 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1836 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1837 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1838 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1839 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1840 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1841 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1842 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1843 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1844 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1853 * ice_fill_sw_rule - Helper function to fill switch rule structure
1854 * @hw: pointer to the hardware structure
1855 * @f_info: entry containing packet forwarding information
1856 * @s_rule: switch rule structure to be filled in based on mac_entry
1857 * @opc: switch rules population command type - pass in the command opcode
1860 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1861 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1863 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1871 if (opc == ice_aqc_opc_remove_sw_rules) {
1872 s_rule->pdata.lkup_tx_rx.act = 0;
1873 s_rule->pdata.lkup_tx_rx.index =
1874 CPU_TO_LE16(f_info->fltr_rule_id);
1875 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1879 eth_hdr_sz = sizeof(dummy_eth_header);
1880 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1882 /* initialize the ether header with a dummy header */
1883 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1884 ice_fill_sw_info(hw, f_info);
1886 switch (f_info->fltr_act) {
1887 case ICE_FWD_TO_VSI:
1888 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1889 ICE_SINGLE_ACT_VSI_ID_M;
1890 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1891 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1892 ICE_SINGLE_ACT_VALID_BIT;
1894 case ICE_FWD_TO_VSI_LIST:
1895 act |= ICE_SINGLE_ACT_VSI_LIST;
1896 act |= (f_info->fwd_id.vsi_list_id <<
1897 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1898 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1899 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1900 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1901 ICE_SINGLE_ACT_VALID_BIT;
1904 act |= ICE_SINGLE_ACT_TO_Q;
1905 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1906 ICE_SINGLE_ACT_Q_INDEX_M;
1908 case ICE_DROP_PACKET:
1909 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1910 ICE_SINGLE_ACT_VALID_BIT;
1912 case ICE_FWD_TO_QGRP:
1913 q_rgn = f_info->qgrp_size > 0 ?
1914 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1915 act |= ICE_SINGLE_ACT_TO_Q;
1916 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1917 ICE_SINGLE_ACT_Q_INDEX_M;
1918 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1919 ICE_SINGLE_ACT_Q_REGION_M;
1926 act |= ICE_SINGLE_ACT_LB_ENABLE;
1928 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1930 switch (f_info->lkup_type) {
1931 case ICE_SW_LKUP_MAC:
1932 daddr = f_info->l_data.mac.mac_addr;
1934 case ICE_SW_LKUP_VLAN:
1935 vlan_id = f_info->l_data.vlan.vlan_id;
1936 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1937 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1938 act |= ICE_SINGLE_ACT_PRUNE;
1939 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1942 case ICE_SW_LKUP_ETHERTYPE_MAC:
1943 daddr = f_info->l_data.ethertype_mac.mac_addr;
1945 case ICE_SW_LKUP_ETHERTYPE:
1946 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1947 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1949 case ICE_SW_LKUP_MAC_VLAN:
1950 daddr = f_info->l_data.mac_vlan.mac_addr;
1951 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1953 case ICE_SW_LKUP_PROMISC_VLAN:
1954 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1956 case ICE_SW_LKUP_PROMISC:
1957 daddr = f_info->l_data.mac_vlan.mac_addr;
1963 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1964 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1965 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1967 /* Recipe set depending on lookup type */
1968 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1969 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1970 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1973 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1974 ICE_NONDMA_TO_NONDMA);
1976 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1977 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1978 *off = CPU_TO_BE16(vlan_id);
1981 /* Create the switch rule with the final dummy Ethernet header */
1982 if (opc != ice_aqc_opc_update_sw_rules)
1983 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1987 * ice_add_marker_act
1988 * @hw: pointer to the hardware structure
1989 * @m_ent: the management entry for which sw marker needs to be added
1990 * @sw_marker: sw marker to tag the Rx descriptor with
1991 * @l_id: large action resource ID
1993 * Create a large action to hold software marker and update the switch rule
1994 * entry pointed by m_ent with newly created large action
1996 static enum ice_status
1997 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1998 u16 sw_marker, u16 l_id)
2000 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2001 /* For software marker we need 3 large actions
2002 * 1. FWD action: FWD TO VSI or VSI LIST
2003 * 2. GENERIC VALUE action to hold the profile ID
2004 * 3. GENERIC VALUE action to hold the software marker ID
2006 const u16 num_lg_acts = 3;
2007 enum ice_status status;
2013 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2014 return ICE_ERR_PARAM;
2016 /* Create two back-to-back switch rules and submit them to the HW using
2017 * one memory buffer:
2021 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2022 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2023 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2025 return ICE_ERR_NO_MEMORY;
2027 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2029 /* Fill in the first switch rule i.e. large action */
2030 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2031 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2032 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2034 /* First action VSI forwarding or VSI list forwarding depending on how
2037 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2038 m_ent->fltr_info.fwd_id.hw_vsi_id;
2040 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2041 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2042 ICE_LG_ACT_VSI_LIST_ID_M;
2043 if (m_ent->vsi_count > 1)
2044 act |= ICE_LG_ACT_VSI_LIST;
2045 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2047 /* Second action descriptor type */
2048 act = ICE_LG_ACT_GENERIC;
2050 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2051 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2053 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2054 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2056 /* Third action Marker value */
2057 act |= ICE_LG_ACT_GENERIC;
2058 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2059 ICE_LG_ACT_GENERIC_VALUE_M;
2061 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2063 /* call the fill switch rule to fill the lookup Tx Rx structure */
2064 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2065 ice_aqc_opc_update_sw_rules);
2067 /* Update the action to point to the large action ID */
2068 rx_tx->pdata.lkup_tx_rx.act =
2069 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2070 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2071 ICE_SINGLE_ACT_PTR_VAL_M));
2073 /* Use the filter rule ID of the previously created rule with single
2074 * act. Once the update happens, hardware will treat this as large
2077 rx_tx->pdata.lkup_tx_rx.index =
2078 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2080 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2081 ice_aqc_opc_update_sw_rules, NULL);
2083 m_ent->lg_act_idx = l_id;
2084 m_ent->sw_marker_id = sw_marker;
2087 ice_free(hw, lg_act);
2092 * ice_add_counter_act - add/update filter rule with counter action
2093 * @hw: pointer to the hardware structure
2094 * @m_ent: the management entry for which counter needs to be added
2095 * @counter_id: VLAN counter ID returned as part of allocate resource
2096 * @l_id: large action resource ID
2098 static enum ice_status
2099 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2100 u16 counter_id, u16 l_id)
2102 struct ice_aqc_sw_rules_elem *lg_act;
2103 struct ice_aqc_sw_rules_elem *rx_tx;
2104 enum ice_status status;
2105 /* 2 actions will be added while adding a large action counter */
2106 const int num_acts = 2;
2113 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2114 return ICE_ERR_PARAM;
2116 /* Create two back-to-back switch rules and submit them to the HW using
2117 * one memory buffer:
2121 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2122 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2123 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2126 return ICE_ERR_NO_MEMORY;
2128 rx_tx = (struct ice_aqc_sw_rules_elem *)
2129 ((u8 *)lg_act + lg_act_size);
2131 /* Fill in the first switch rule i.e. large action */
2132 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2133 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2134 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2136 /* First action VSI forwarding or VSI list forwarding depending on how
2139 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2140 m_ent->fltr_info.fwd_id.hw_vsi_id;
2142 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2143 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2144 ICE_LG_ACT_VSI_LIST_ID_M;
2145 if (m_ent->vsi_count > 1)
2146 act |= ICE_LG_ACT_VSI_LIST;
2147 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2149 /* Second action counter ID */
2150 act = ICE_LG_ACT_STAT_COUNT;
2151 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2152 ICE_LG_ACT_STAT_COUNT_M;
2153 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2155 /* call the fill switch rule to fill the lookup Tx Rx structure */
2156 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2157 ice_aqc_opc_update_sw_rules);
2159 act = ICE_SINGLE_ACT_PTR;
2160 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2161 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2163 /* Use the filter rule ID of the previously created rule with single
2164 * act. Once the update happens, hardware will treat this as large
2167 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2168 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2170 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2171 ice_aqc_opc_update_sw_rules, NULL);
2173 m_ent->lg_act_idx = l_id;
2174 m_ent->counter_index = counter_id;
2177 ice_free(hw, lg_act);
2182 * ice_create_vsi_list_map
2183 * @hw: pointer to the hardware structure
2184 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2185 * @num_vsi: number of VSI handles in the array
2186 * @vsi_list_id: VSI list ID generated as part of allocate resource
2188 * Helper function to create a new entry of VSI list ID to VSI mapping
2189 * using the given VSI list ID
2191 static struct ice_vsi_list_map_info *
2192 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2195 struct ice_switch_info *sw = hw->switch_info;
2196 struct ice_vsi_list_map_info *v_map;
2199 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2204 v_map->vsi_list_id = vsi_list_id;
2206 for (i = 0; i < num_vsi; i++)
2207 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2209 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2214 * ice_update_vsi_list_rule
2215 * @hw: pointer to the hardware structure
2216 * @vsi_handle_arr: array of VSI handles to form a VSI list
2217 * @num_vsi: number of VSI handles in the array
2218 * @vsi_list_id: VSI list ID generated as part of allocate resource
2219 * @remove: Boolean value to indicate if this is a remove action
2220 * @opc: switch rules population command type - pass in the command opcode
2221 * @lkup_type: lookup type of the filter
2223 * Call AQ command to add a new switch rule or update existing switch rule
2224 * using the given VSI list ID
2226 static enum ice_status
2227 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2228 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2229 enum ice_sw_lkup_type lkup_type)
2231 struct ice_aqc_sw_rules_elem *s_rule;
2232 enum ice_status status;
2238 return ICE_ERR_PARAM;
2240 if (lkup_type == ICE_SW_LKUP_MAC ||
2241 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2242 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2243 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2244 lkup_type == ICE_SW_LKUP_PROMISC ||
2245 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2246 lkup_type == ICE_SW_LKUP_LAST)
2247 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2248 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2249 else if (lkup_type == ICE_SW_LKUP_VLAN)
2250 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2251 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2253 return ICE_ERR_PARAM;
2255 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2256 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2258 return ICE_ERR_NO_MEMORY;
2259 for (i = 0; i < num_vsi; i++) {
2260 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2261 status = ICE_ERR_PARAM;
2264 /* AQ call requires hw_vsi_id(s) */
2265 s_rule->pdata.vsi_list.vsi[i] =
2266 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2269 s_rule->type = CPU_TO_LE16(type);
2270 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2271 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2273 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2276 ice_free(hw, s_rule);
2281 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2282 * @hw: pointer to the HW struct
2283 * @vsi_handle_arr: array of VSI handles to form a VSI list
2284 * @num_vsi: number of VSI handles in the array
2285 * @vsi_list_id: stores the ID of the VSI list to be created
2286 * @lkup_type: switch rule filter's lookup type
2288 static enum ice_status
2289 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2290 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2292 enum ice_status status;
2294 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2295 ice_aqc_opc_alloc_res);
2299 /* Update the newly created VSI list to include the specified VSIs */
2300 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2301 *vsi_list_id, false,
2302 ice_aqc_opc_add_sw_rules, lkup_type);
2306 * ice_create_pkt_fwd_rule
2307 * @hw: pointer to the hardware structure
2308 * @f_entry: entry containing packet forwarding information
2310 * Create switch rule with given filter information and add an entry
2311 * to the corresponding filter management list to track this switch rule
2314 static enum ice_status
2315 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2316 struct ice_fltr_list_entry *f_entry)
2318 struct ice_fltr_mgmt_list_entry *fm_entry;
2319 struct ice_aqc_sw_rules_elem *s_rule;
2320 enum ice_sw_lkup_type l_type;
2321 struct ice_sw_recipe *recp;
2322 enum ice_status status;
2324 s_rule = (struct ice_aqc_sw_rules_elem *)
2325 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2327 return ICE_ERR_NO_MEMORY;
2328 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2329 ice_malloc(hw, sizeof(*fm_entry));
2331 status = ICE_ERR_NO_MEMORY;
2332 goto ice_create_pkt_fwd_rule_exit;
2335 fm_entry->fltr_info = f_entry->fltr_info;
2337 /* Initialize all the fields for the management entry */
2338 fm_entry->vsi_count = 1;
2339 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2340 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2341 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2343 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2344 ice_aqc_opc_add_sw_rules);
2346 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2347 ice_aqc_opc_add_sw_rules, NULL);
2349 ice_free(hw, fm_entry);
2350 goto ice_create_pkt_fwd_rule_exit;
2353 f_entry->fltr_info.fltr_rule_id =
2354 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2355 fm_entry->fltr_info.fltr_rule_id =
2356 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2358 /* The book keeping entries will get removed when base driver
2359 * calls remove filter AQ command
2361 l_type = fm_entry->fltr_info.lkup_type;
2362 recp = &hw->switch_info->recp_list[l_type];
2363 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2365 ice_create_pkt_fwd_rule_exit:
2366 ice_free(hw, s_rule);
2371 * ice_update_pkt_fwd_rule
2372 * @hw: pointer to the hardware structure
2373 * @f_info: filter information for switch rule
2375 * Call AQ command to update a previously created switch rule with a
2378 static enum ice_status
2379 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2381 struct ice_aqc_sw_rules_elem *s_rule;
2382 enum ice_status status;
2384 s_rule = (struct ice_aqc_sw_rules_elem *)
2385 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2387 return ICE_ERR_NO_MEMORY;
2389 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2391 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2393 /* Update switch rule with new rule set to forward VSI list */
2394 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2395 ice_aqc_opc_update_sw_rules, NULL);
2397 ice_free(hw, s_rule);
2402 * ice_update_sw_rule_bridge_mode
2403 * @hw: pointer to the HW struct
2405 * Updates unicast switch filter rules based on VEB/VEPA mode
2407 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2409 struct ice_switch_info *sw = hw->switch_info;
2410 struct ice_fltr_mgmt_list_entry *fm_entry;
2411 enum ice_status status = ICE_SUCCESS;
2412 struct LIST_HEAD_TYPE *rule_head;
2413 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2415 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2416 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2418 ice_acquire_lock(rule_lock);
2419 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2421 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2422 u8 *addr = fi->l_data.mac.mac_addr;
2424 /* Update unicast Tx rules to reflect the selected
2427 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2428 (fi->fltr_act == ICE_FWD_TO_VSI ||
2429 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2430 fi->fltr_act == ICE_FWD_TO_Q ||
2431 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2432 status = ice_update_pkt_fwd_rule(hw, fi);
2438 ice_release_lock(rule_lock);
2444 * ice_add_update_vsi_list
2445 * @hw: pointer to the hardware structure
2446 * @m_entry: pointer to current filter management list entry
2447 * @cur_fltr: filter information from the book keeping entry
2448 * @new_fltr: filter information with the new VSI to be added
2450 * Call AQ command to add or update previously created VSI list with new VSI.
2452 * Helper function to do book keeping associated with adding filter information
2453 * The algorithm to do the book keeping is described below :
2454 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2455 * if only one VSI has been added till now
2456 * Allocate a new VSI list and add two VSIs
2457 * to this list using switch rule command
2458 * Update the previously created switch rule with the
2459 * newly created VSI list ID
2460 * if a VSI list was previously created
2461 * Add the new VSI to the previously created VSI list set
2462 * using the update switch rule command
2464 static enum ice_status
2465 ice_add_update_vsi_list(struct ice_hw *hw,
2466 struct ice_fltr_mgmt_list_entry *m_entry,
2467 struct ice_fltr_info *cur_fltr,
2468 struct ice_fltr_info *new_fltr)
2470 enum ice_status status = ICE_SUCCESS;
2471 u16 vsi_list_id = 0;
2473 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2474 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2475 return ICE_ERR_NOT_IMPL;
2477 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2478 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2479 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2480 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2481 return ICE_ERR_NOT_IMPL;
2483 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2484 /* Only one entry existed in the mapping and it was not already
2485 * a part of a VSI list. So, create a VSI list with the old and
2488 struct ice_fltr_info tmp_fltr;
2489 u16 vsi_handle_arr[2];
2491 /* A rule already exists with the new VSI being added */
2492 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2493 return ICE_ERR_ALREADY_EXISTS;
2495 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2496 vsi_handle_arr[1] = new_fltr->vsi_handle;
2497 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2499 new_fltr->lkup_type);
2503 tmp_fltr = *new_fltr;
2504 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2505 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2506 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2507 /* Update the previous switch rule of "MAC forward to VSI" to
2508 * "MAC fwd to VSI list"
2510 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2514 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2515 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2516 m_entry->vsi_list_info =
2517 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2520 /* If this entry was large action then the large action needs
2521 * to be updated to point to FWD to VSI list
2523 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2525 ice_add_marker_act(hw, m_entry,
2526 m_entry->sw_marker_id,
2527 m_entry->lg_act_idx);
2529 u16 vsi_handle = new_fltr->vsi_handle;
2530 enum ice_adminq_opc opcode;
2532 if (!m_entry->vsi_list_info)
2535 /* A rule already exists with the new VSI being added */
2536 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2539 /* Update the previously created VSI list set with
2540 * the new VSI ID passed in
2542 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2543 opcode = ice_aqc_opc_update_sw_rules;
2545 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2546 vsi_list_id, false, opcode,
2547 new_fltr->lkup_type);
2548 /* update VSI list mapping info with new VSI ID */
2550 ice_set_bit(vsi_handle,
2551 m_entry->vsi_list_info->vsi_map);
2554 m_entry->vsi_count++;
2559 * ice_find_rule_entry - Search a rule entry
2560 * @hw: pointer to the hardware structure
2561 * @recp_id: lookup type for which the specified rule needs to be searched
2562 * @f_info: rule information
2564 * Helper function to search for a given rule entry
2565 * Returns pointer to entry storing the rule if found
2567 static struct ice_fltr_mgmt_list_entry *
2568 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2570 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2571 struct ice_switch_info *sw = hw->switch_info;
2572 struct LIST_HEAD_TYPE *list_head;
2574 list_head = &sw->recp_list[recp_id].filt_rules;
2575 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2577 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2578 sizeof(f_info->l_data)) &&
2579 f_info->flag == list_itr->fltr_info.flag) {
2588 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2589 * @hw: pointer to the hardware structure
2590 * @recp_id: lookup type for which VSI lists needs to be searched
2591 * @vsi_handle: VSI handle to be found in VSI list
2592 * @vsi_list_id: VSI list ID found containing vsi_handle
2594 * Helper function to search a VSI list with single entry containing given VSI
2595 * handle element. This can be extended further to search VSI list with more
2596 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2598 static struct ice_vsi_list_map_info *
2599 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2602 struct ice_vsi_list_map_info *map_info = NULL;
2603 struct ice_switch_info *sw = hw->switch_info;
2604 struct LIST_HEAD_TYPE *list_head;
2606 list_head = &sw->recp_list[recp_id].filt_rules;
2607 if (sw->recp_list[recp_id].adv_rule) {
2608 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2610 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2611 ice_adv_fltr_mgmt_list_entry,
2613 if (list_itr->vsi_list_info) {
2614 map_info = list_itr->vsi_list_info;
2615 if (ice_is_bit_set(map_info->vsi_map,
2617 *vsi_list_id = map_info->vsi_list_id;
2623 struct ice_fltr_mgmt_list_entry *list_itr;
2625 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2626 ice_fltr_mgmt_list_entry,
2628 if (list_itr->vsi_count == 1 &&
2629 list_itr->vsi_list_info) {
2630 map_info = list_itr->vsi_list_info;
2631 if (ice_is_bit_set(map_info->vsi_map,
2633 *vsi_list_id = map_info->vsi_list_id;
2643 * ice_add_rule_internal - add rule for a given lookup type
2644 * @hw: pointer to the hardware structure
2645 * @recp_id: lookup type (recipe ID) for which rule has to be added
2646 * @f_entry: structure containing MAC forwarding information
2648 * Adds or updates the rule lists for a given recipe
2650 static enum ice_status
2651 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2652 struct ice_fltr_list_entry *f_entry)
2654 struct ice_switch_info *sw = hw->switch_info;
2655 struct ice_fltr_info *new_fltr, *cur_fltr;
2656 struct ice_fltr_mgmt_list_entry *m_entry;
2657 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2658 enum ice_status status = ICE_SUCCESS;
2660 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2661 return ICE_ERR_PARAM;
2663 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2664 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2665 f_entry->fltr_info.fwd_id.hw_vsi_id =
2666 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2668 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2670 ice_acquire_lock(rule_lock);
2671 new_fltr = &f_entry->fltr_info;
2672 if (new_fltr->flag & ICE_FLTR_RX)
2673 new_fltr->src = hw->port_info->lport;
2674 else if (new_fltr->flag & ICE_FLTR_TX)
2676 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2678 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2680 status = ice_create_pkt_fwd_rule(hw, f_entry);
2681 goto exit_add_rule_internal;
2684 cur_fltr = &m_entry->fltr_info;
2685 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2687 exit_add_rule_internal:
2688 ice_release_lock(rule_lock);
2693 * ice_remove_vsi_list_rule
2694 * @hw: pointer to the hardware structure
2695 * @vsi_list_id: VSI list ID generated as part of allocate resource
2696 * @lkup_type: switch rule filter lookup type
2698 * The VSI list should be emptied before this function is called to remove the
2701 static enum ice_status
2702 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2703 enum ice_sw_lkup_type lkup_type)
2705 struct ice_aqc_sw_rules_elem *s_rule;
2706 enum ice_status status;
2709 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2710 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2712 return ICE_ERR_NO_MEMORY;
2714 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2715 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2717 /* Free the vsi_list resource that we allocated. It is assumed that the
2718 * list is empty at this point.
2720 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2721 ice_aqc_opc_free_res);
2723 ice_free(hw, s_rule);
2728 * ice_rem_update_vsi_list
2729 * @hw: pointer to the hardware structure
2730 * @vsi_handle: VSI handle of the VSI to remove
2731 * @fm_list: filter management entry for which the VSI list management needs to
2734 static enum ice_status
2735 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2736 struct ice_fltr_mgmt_list_entry *fm_list)
2738 enum ice_sw_lkup_type lkup_type;
2739 enum ice_status status = ICE_SUCCESS;
2742 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2743 fm_list->vsi_count == 0)
2744 return ICE_ERR_PARAM;
2746 /* A rule with the VSI being removed does not exist */
2747 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2748 return ICE_ERR_DOES_NOT_EXIST;
2750 lkup_type = fm_list->fltr_info.lkup_type;
2751 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2752 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2753 ice_aqc_opc_update_sw_rules,
2758 fm_list->vsi_count--;
2759 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2761 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2762 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2763 struct ice_vsi_list_map_info *vsi_list_info =
2764 fm_list->vsi_list_info;
2767 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2769 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2770 return ICE_ERR_OUT_OF_RANGE;
2772 /* Make sure VSI list is empty before removing it below */
2773 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2775 ice_aqc_opc_update_sw_rules,
2780 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2781 tmp_fltr_info.fwd_id.hw_vsi_id =
2782 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2783 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2784 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2786 ice_debug(hw, ICE_DBG_SW,
2787 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2788 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2792 fm_list->fltr_info = tmp_fltr_info;
2795 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2796 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2797 struct ice_vsi_list_map_info *vsi_list_info =
2798 fm_list->vsi_list_info;
2800 /* Remove the VSI list since it is no longer used */
2801 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2803 ice_debug(hw, ICE_DBG_SW,
2804 "Failed to remove VSI list %d, error %d\n",
2805 vsi_list_id, status);
2809 LIST_DEL(&vsi_list_info->list_entry);
2810 ice_free(hw, vsi_list_info);
2811 fm_list->vsi_list_info = NULL;
2818 * ice_remove_rule_internal - Remove a filter rule of a given type
2820 * @hw: pointer to the hardware structure
2821 * @recp_id: recipe ID for which the rule needs to removed
2822 * @f_entry: rule entry containing filter information
2824 static enum ice_status
2825 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2826 struct ice_fltr_list_entry *f_entry)
2828 struct ice_switch_info *sw = hw->switch_info;
2829 struct ice_fltr_mgmt_list_entry *list_elem;
2830 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2831 enum ice_status status = ICE_SUCCESS;
2832 bool remove_rule = false;
2835 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2836 return ICE_ERR_PARAM;
2837 f_entry->fltr_info.fwd_id.hw_vsi_id =
2838 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2840 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2841 ice_acquire_lock(rule_lock);
2842 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2844 status = ICE_ERR_DOES_NOT_EXIST;
2848 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2850 } else if (!list_elem->vsi_list_info) {
2851 status = ICE_ERR_DOES_NOT_EXIST;
2853 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2854 /* a ref_cnt > 1 indicates that the vsi_list is being
2855 * shared by multiple rules. Decrement the ref_cnt and
2856 * remove this rule, but do not modify the list, as it
2857 * is in-use by other rules.
2859 list_elem->vsi_list_info->ref_cnt--;
2862 /* a ref_cnt of 1 indicates the vsi_list is only used
2863 * by one rule. However, the original removal request is only
2864 * for a single VSI. Update the vsi_list first, and only
2865 * remove the rule if there are no further VSIs in this list.
2867 vsi_handle = f_entry->fltr_info.vsi_handle;
2868 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2871 /* if VSI count goes to zero after updating the VSI list */
2872 if (list_elem->vsi_count == 0)
2877 /* Remove the lookup rule */
2878 struct ice_aqc_sw_rules_elem *s_rule;
2880 s_rule = (struct ice_aqc_sw_rules_elem *)
2881 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2883 status = ICE_ERR_NO_MEMORY;
2887 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2888 ice_aqc_opc_remove_sw_rules);
2890 status = ice_aq_sw_rules(hw, s_rule,
2891 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2892 ice_aqc_opc_remove_sw_rules, NULL);
2894 /* Remove a book keeping from the list */
2895 ice_free(hw, s_rule);
2900 LIST_DEL(&list_elem->list_entry);
2901 ice_free(hw, list_elem);
2904 ice_release_lock(rule_lock);
2909 * ice_aq_get_res_alloc - get allocated resources
2910 * @hw: pointer to the HW struct
2911 * @num_entries: pointer to u16 to store the number of resource entries returned
2912 * @buf: pointer to user-supplied buffer
2913 * @buf_size: size of buff
2914 * @cd: pointer to command details structure or NULL
2916 * The user-supplied buffer must be large enough to store the resource
2917 * information for all resource types. Each resource type is an
2918 * ice_aqc_get_res_resp_data_elem structure.
2921 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2922 u16 buf_size, struct ice_sq_cd *cd)
2924 struct ice_aqc_get_res_alloc *resp;
2925 enum ice_status status;
2926 struct ice_aq_desc desc;
2929 return ICE_ERR_BAD_PTR;
2931 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2932 return ICE_ERR_INVAL_SIZE;
2934 resp = &desc.params.get_res;
2936 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2937 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2939 if (!status && num_entries)
2940 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2946 * ice_aq_get_res_descs - get allocated resource descriptors
2947 * @hw: pointer to the hardware structure
2948 * @num_entries: number of resource entries in buffer
2949 * @buf: Indirect buffer to hold data parameters and response
2950 * @buf_size: size of buffer for indirect commands
2951 * @res_type: resource type
2952 * @res_shared: is resource shared
2953 * @desc_id: input - first desc ID to start; output - next desc ID
2954 * @cd: pointer to command details structure or NULL
2957 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2958 struct ice_aqc_get_allocd_res_desc_resp *buf,
2959 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2960 struct ice_sq_cd *cd)
2962 struct ice_aqc_get_allocd_res_desc *cmd;
2963 struct ice_aq_desc desc;
2964 enum ice_status status;
2966 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2968 cmd = &desc.params.get_res_desc;
2971 return ICE_ERR_PARAM;
2973 if (buf_size != (num_entries * sizeof(*buf)))
2974 return ICE_ERR_PARAM;
2976 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2978 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2979 ICE_AQC_RES_TYPE_M) | (res_shared ?
2980 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2981 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2983 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2985 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2991 * ice_add_mac - Add a MAC address based filter rule
2992 * @hw: pointer to the hardware structure
2993 * @m_list: list of MAC addresses and forwarding information
2995 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2996 * multiple unicast addresses, the function assumes that all the
2997 * addresses are unique in a given add_mac call. It doesn't
2998 * check for duplicates in this case, removing duplicates from a given
2999 * list should be taken care of in the caller of this function.
3002 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3004 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3005 struct ice_fltr_list_entry *m_list_itr;
3006 struct LIST_HEAD_TYPE *rule_head;
3007 u16 elem_sent, total_elem_left;
3008 struct ice_switch_info *sw;
3009 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3010 enum ice_status status = ICE_SUCCESS;
3011 u16 num_unicast = 0;
3015 return ICE_ERR_PARAM;
3017 sw = hw->switch_info;
3018 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3019 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3021 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3025 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3026 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3027 if (!ice_is_vsi_valid(hw, vsi_handle))
3028 return ICE_ERR_PARAM;
3029 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3030 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3031 /* update the src in case it is VSI num */
3032 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3033 return ICE_ERR_PARAM;
3034 m_list_itr->fltr_info.src = hw_vsi_id;
3035 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3036 IS_ZERO_ETHER_ADDR(add))
3037 return ICE_ERR_PARAM;
3038 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3039 /* Don't overwrite the unicast address */
3040 ice_acquire_lock(rule_lock);
3041 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3042 &m_list_itr->fltr_info)) {
3043 ice_release_lock(rule_lock);
3044 return ICE_ERR_ALREADY_EXISTS;
3046 ice_release_lock(rule_lock);
3048 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3049 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3050 m_list_itr->status =
3051 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3053 if (m_list_itr->status)
3054 return m_list_itr->status;
3058 ice_acquire_lock(rule_lock);
3059 /* Exit if no suitable entries were found for adding bulk switch rule */
3061 status = ICE_SUCCESS;
3062 goto ice_add_mac_exit;
3065 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3067 /* Allocate switch rule buffer for the bulk update for unicast */
3068 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3069 s_rule = (struct ice_aqc_sw_rules_elem *)
3070 ice_calloc(hw, num_unicast, s_rule_size);
3072 status = ICE_ERR_NO_MEMORY;
3073 goto ice_add_mac_exit;
3077 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3079 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3080 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3082 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3083 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3084 ice_aqc_opc_add_sw_rules);
3085 r_iter = (struct ice_aqc_sw_rules_elem *)
3086 ((u8 *)r_iter + s_rule_size);
3090 /* Call AQ bulk switch rule update for all unicast addresses */
3092 /* Call AQ switch rule in AQ_MAX chunk */
3093 for (total_elem_left = num_unicast; total_elem_left > 0;
3094 total_elem_left -= elem_sent) {
3095 struct ice_aqc_sw_rules_elem *entry = r_iter;
3097 elem_sent = min(total_elem_left,
3098 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3099 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3100 elem_sent, ice_aqc_opc_add_sw_rules,
3103 goto ice_add_mac_exit;
3104 r_iter = (struct ice_aqc_sw_rules_elem *)
3105 ((u8 *)r_iter + (elem_sent * s_rule_size));
3108 /* Fill up rule ID based on the value returned from FW */
3110 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3112 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3113 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3114 struct ice_fltr_mgmt_list_entry *fm_entry;
3116 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3117 f_info->fltr_rule_id =
3118 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3119 f_info->fltr_act = ICE_FWD_TO_VSI;
3120 /* Create an entry to track this MAC address */
3121 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3122 ice_malloc(hw, sizeof(*fm_entry));
3124 status = ICE_ERR_NO_MEMORY;
3125 goto ice_add_mac_exit;
3127 fm_entry->fltr_info = *f_info;
3128 fm_entry->vsi_count = 1;
3129 /* The book keeping entries will get removed when
3130 * base driver calls remove filter AQ command
3133 LIST_ADD(&fm_entry->list_entry, rule_head);
3134 r_iter = (struct ice_aqc_sw_rules_elem *)
3135 ((u8 *)r_iter + s_rule_size);
3140 ice_release_lock(rule_lock);
3142 ice_free(hw, s_rule);
3147 * ice_add_vlan_internal - Add one VLAN based filter rule
3148 * @hw: pointer to the hardware structure
3149 * @f_entry: filter entry containing one VLAN information
3151 static enum ice_status
3152 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3154 struct ice_switch_info *sw = hw->switch_info;
3155 struct ice_fltr_mgmt_list_entry *v_list_itr;
3156 struct ice_fltr_info *new_fltr, *cur_fltr;
3157 enum ice_sw_lkup_type lkup_type;
3158 u16 vsi_list_id = 0, vsi_handle;
3159 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3160 enum ice_status status = ICE_SUCCESS;
3162 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3163 return ICE_ERR_PARAM;
3165 f_entry->fltr_info.fwd_id.hw_vsi_id =
3166 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3167 new_fltr = &f_entry->fltr_info;
3169 /* VLAN ID should only be 12 bits */
3170 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3171 return ICE_ERR_PARAM;
3173 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3174 return ICE_ERR_PARAM;
3176 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3177 lkup_type = new_fltr->lkup_type;
3178 vsi_handle = new_fltr->vsi_handle;
3179 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3180 ice_acquire_lock(rule_lock);
3181 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3183 struct ice_vsi_list_map_info *map_info = NULL;
3185 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3186 /* All VLAN pruning rules use a VSI list. Check if
3187 * there is already a VSI list containing VSI that we
3188 * want to add. If found, use the same vsi_list_id for
3189 * this new VLAN rule or else create a new list.
3191 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3195 status = ice_create_vsi_list_rule(hw,
3203 /* Convert the action to forwarding to a VSI list. */
3204 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3205 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3208 status = ice_create_pkt_fwd_rule(hw, f_entry);
3210 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3213 status = ICE_ERR_DOES_NOT_EXIST;
3216 /* reuse VSI list for new rule and increment ref_cnt */
3218 v_list_itr->vsi_list_info = map_info;
3219 map_info->ref_cnt++;
3221 v_list_itr->vsi_list_info =
3222 ice_create_vsi_list_map(hw, &vsi_handle,
3226 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3227 /* Update existing VSI list to add new VSI ID only if it used
3230 cur_fltr = &v_list_itr->fltr_info;
3231 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3234 /* If VLAN rule exists and VSI list being used by this rule is
3235 * referenced by more than 1 VLAN rule. Then create a new VSI
3236 * list appending previous VSI with new VSI and update existing
3237 * VLAN rule to point to new VSI list ID
3239 struct ice_fltr_info tmp_fltr;
3240 u16 vsi_handle_arr[2];
3243 /* Current implementation only supports reusing VSI list with
3244 * one VSI count. We should never hit below condition
3246 if (v_list_itr->vsi_count > 1 &&
3247 v_list_itr->vsi_list_info->ref_cnt > 1) {
3248 ice_debug(hw, ICE_DBG_SW,
3249 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3250 status = ICE_ERR_CFG;
3255 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3258 /* A rule already exists with the new VSI being added */
3259 if (cur_handle == vsi_handle) {
3260 status = ICE_ERR_ALREADY_EXISTS;
3264 vsi_handle_arr[0] = cur_handle;
3265 vsi_handle_arr[1] = vsi_handle;
3266 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3267 &vsi_list_id, lkup_type);
3271 tmp_fltr = v_list_itr->fltr_info;
3272 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3273 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3274 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3275 /* Update the previous switch rule to a new VSI list which
3276 * includes current VSI that is requested
3278 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3282 /* before overriding VSI list map info. decrement ref_cnt of
3285 v_list_itr->vsi_list_info->ref_cnt--;
3287 /* now update to newly created list */
3288 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3289 v_list_itr->vsi_list_info =
3290 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3292 v_list_itr->vsi_count++;
3296 ice_release_lock(rule_lock);
3301 * ice_add_vlan - Add VLAN based filter rule
3302 * @hw: pointer to the hardware structure
3303 * @v_list: list of VLAN entries and forwarding information
3306 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3308 struct ice_fltr_list_entry *v_list_itr;
3311 return ICE_ERR_PARAM;
3313 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3315 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3316 return ICE_ERR_PARAM;
3317 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3318 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3319 if (v_list_itr->status)
3320 return v_list_itr->status;
3326 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3327 * @hw: pointer to the hardware structure
3328 * @mv_list: list of MAC and VLAN filters
3330 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3331 * pruning bits enabled, then it is the responsibility of the caller to make
3332 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3333 * VLAN won't be received on that VSI otherwise.
3336 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3338 struct ice_fltr_list_entry *mv_list_itr;
3340 if (!mv_list || !hw)
3341 return ICE_ERR_PARAM;
3343 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3345 enum ice_sw_lkup_type l_type =
3346 mv_list_itr->fltr_info.lkup_type;
3348 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3349 return ICE_ERR_PARAM;
3350 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3351 mv_list_itr->status =
3352 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3354 if (mv_list_itr->status)
3355 return mv_list_itr->status;
3361 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3362 * @hw: pointer to the hardware structure
3363 * @em_list: list of ether type MAC filter, MAC is optional
3365 * This function requires the caller to populate the entries in
3366 * the filter list with the necessary fields (including flags to
3367 * indicate Tx or Rx rules).
3370 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3372 struct ice_fltr_list_entry *em_list_itr;
3374 if (!em_list || !hw)
3375 return ICE_ERR_PARAM;
3377 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3379 enum ice_sw_lkup_type l_type =
3380 em_list_itr->fltr_info.lkup_type;
3382 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3383 l_type != ICE_SW_LKUP_ETHERTYPE)
3384 return ICE_ERR_PARAM;
3386 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3388 if (em_list_itr->status)
3389 return em_list_itr->status;
3395 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3396 * @hw: pointer to the hardware structure
3397 * @em_list: list of ethertype or ethertype MAC entries
3400 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3402 struct ice_fltr_list_entry *em_list_itr, *tmp;
3404 if (!em_list || !hw)
3405 return ICE_ERR_PARAM;
3407 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3409 enum ice_sw_lkup_type l_type =
3410 em_list_itr->fltr_info.lkup_type;
3412 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3413 l_type != ICE_SW_LKUP_ETHERTYPE)
3414 return ICE_ERR_PARAM;
3416 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3418 if (em_list_itr->status)
3419 return em_list_itr->status;
3426 * ice_rem_sw_rule_info
3427 * @hw: pointer to the hardware structure
3428 * @rule_head: pointer to the switch list structure that we want to delete
3431 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3433 if (!LIST_EMPTY(rule_head)) {
3434 struct ice_fltr_mgmt_list_entry *entry;
3435 struct ice_fltr_mgmt_list_entry *tmp;
3437 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3438 ice_fltr_mgmt_list_entry, list_entry) {
3439 LIST_DEL(&entry->list_entry);
3440 ice_free(hw, entry);
3446 * ice_rem_adv_rule_info
3447 * @hw: pointer to the hardware structure
3448 * @rule_head: pointer to the switch list structure that we want to delete
3451 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3453 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3454 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3456 if (LIST_EMPTY(rule_head))
3459 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3460 ice_adv_fltr_mgmt_list_entry, list_entry) {
3461 LIST_DEL(&lst_itr->list_entry);
3462 ice_free(hw, lst_itr->lkups);
3463 ice_free(hw, lst_itr);
3468 * ice_rem_all_sw_rules_info
3469 * @hw: pointer to the hardware structure
3471 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3473 struct ice_switch_info *sw = hw->switch_info;
3476 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3477 struct LIST_HEAD_TYPE *rule_head;
3479 rule_head = &sw->recp_list[i].filt_rules;
3480 if (!sw->recp_list[i].adv_rule)
3481 ice_rem_sw_rule_info(hw, rule_head);
3483 ice_rem_adv_rule_info(hw, rule_head);
3488 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3489 * @pi: pointer to the port_info structure
3490 * @vsi_handle: VSI handle to set as default
3491 * @set: true to add the above mentioned switch rule, false to remove it
3492 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3494 * add filter rule to set/unset given VSI as default VSI for the switch
3495 * (represented by swid)
3498 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3501 struct ice_aqc_sw_rules_elem *s_rule;
3502 struct ice_fltr_info f_info;
3503 struct ice_hw *hw = pi->hw;
3504 enum ice_adminq_opc opcode;
3505 enum ice_status status;
3509 if (!ice_is_vsi_valid(hw, vsi_handle))
3510 return ICE_ERR_PARAM;
3511 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3513 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3514 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3515 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3517 return ICE_ERR_NO_MEMORY;
3519 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3521 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3522 f_info.flag = direction;
3523 f_info.fltr_act = ICE_FWD_TO_VSI;
3524 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3526 if (f_info.flag & ICE_FLTR_RX) {
3527 f_info.src = pi->lport;
3528 f_info.src_id = ICE_SRC_ID_LPORT;
3530 f_info.fltr_rule_id =
3531 pi->dflt_rx_vsi_rule_id;
3532 } else if (f_info.flag & ICE_FLTR_TX) {
3533 f_info.src_id = ICE_SRC_ID_VSI;
3534 f_info.src = hw_vsi_id;
3536 f_info.fltr_rule_id =
3537 pi->dflt_tx_vsi_rule_id;
3541 opcode = ice_aqc_opc_add_sw_rules;
3543 opcode = ice_aqc_opc_remove_sw_rules;
3545 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3547 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3548 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3551 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3553 if (f_info.flag & ICE_FLTR_TX) {
3554 pi->dflt_tx_vsi_num = hw_vsi_id;
3555 pi->dflt_tx_vsi_rule_id = index;
3556 } else if (f_info.flag & ICE_FLTR_RX) {
3557 pi->dflt_rx_vsi_num = hw_vsi_id;
3558 pi->dflt_rx_vsi_rule_id = index;
3561 if (f_info.flag & ICE_FLTR_TX) {
3562 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3563 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3564 } else if (f_info.flag & ICE_FLTR_RX) {
3565 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3566 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3571 ice_free(hw, s_rule);
3576 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3577 * @hw: pointer to the hardware structure
3578 * @recp_id: lookup type for which the specified rule needs to be searched
3579 * @f_info: rule information
3581 * Helper function to search for a unicast rule entry - this is to be used
3582 * to remove unicast MAC filter that is not shared with other VSIs on the
3585 * Returns pointer to entry storing the rule if found
3587 static struct ice_fltr_mgmt_list_entry *
3588 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3589 struct ice_fltr_info *f_info)
3591 struct ice_switch_info *sw = hw->switch_info;
3592 struct ice_fltr_mgmt_list_entry *list_itr;
3593 struct LIST_HEAD_TYPE *list_head;
3595 list_head = &sw->recp_list[recp_id].filt_rules;
3596 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3598 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3599 sizeof(f_info->l_data)) &&
3600 f_info->fwd_id.hw_vsi_id ==
3601 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3602 f_info->flag == list_itr->fltr_info.flag)
3609 * ice_remove_mac - remove a MAC address based filter rule
3610 * @hw: pointer to the hardware structure
3611 * @m_list: list of MAC addresses and forwarding information
3613 * This function removes either a MAC filter rule or a specific VSI from a
3614 * VSI list for a multicast MAC address.
3616 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3617 * ice_add_mac. Caller should be aware that this call will only work if all
3618 * the entries passed into m_list were added previously. It will not attempt to
3619 * do a partial remove of entries that were found.
3622 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3624 struct ice_fltr_list_entry *list_itr, *tmp;
3625 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3628 return ICE_ERR_PARAM;
3630 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3631 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3633 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3634 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3637 if (l_type != ICE_SW_LKUP_MAC)
3638 return ICE_ERR_PARAM;
3640 vsi_handle = list_itr->fltr_info.vsi_handle;
3641 if (!ice_is_vsi_valid(hw, vsi_handle))
3642 return ICE_ERR_PARAM;
3644 list_itr->fltr_info.fwd_id.hw_vsi_id =
3645 ice_get_hw_vsi_num(hw, vsi_handle);
3646 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3647 /* Don't remove the unicast address that belongs to
3648 * another VSI on the switch, since it is not being
3651 ice_acquire_lock(rule_lock);
3652 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3653 &list_itr->fltr_info)) {
3654 ice_release_lock(rule_lock);
3655 return ICE_ERR_DOES_NOT_EXIST;
3657 ice_release_lock(rule_lock);
3659 list_itr->status = ice_remove_rule_internal(hw,
3662 if (list_itr->status)
3663 return list_itr->status;
3669 * ice_remove_vlan - Remove VLAN based filter rule
3670 * @hw: pointer to the hardware structure
3671 * @v_list: list of VLAN entries and forwarding information
3674 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3676 struct ice_fltr_list_entry *v_list_itr, *tmp;
3679 return ICE_ERR_PARAM;
3681 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3683 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3685 if (l_type != ICE_SW_LKUP_VLAN)
3686 return ICE_ERR_PARAM;
3687 v_list_itr->status = ice_remove_rule_internal(hw,
3690 if (v_list_itr->status)
3691 return v_list_itr->status;
3697 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3698 * @hw: pointer to the hardware structure
3699 * @v_list: list of MAC VLAN entries and forwarding information
3702 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3704 struct ice_fltr_list_entry *v_list_itr, *tmp;
3707 return ICE_ERR_PARAM;
3709 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3711 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3713 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3714 return ICE_ERR_PARAM;
3715 v_list_itr->status =
3716 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3718 if (v_list_itr->status)
3719 return v_list_itr->status;
3725 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3726 * @fm_entry: filter entry to inspect
3727 * @vsi_handle: VSI handle to compare with filter info
3730 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3732 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3733 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3734 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3735 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3740 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3741 * @hw: pointer to the hardware structure
3742 * @vsi_handle: VSI handle to remove filters from
3743 * @vsi_list_head: pointer to the list to add entry to
3744 * @fi: pointer to fltr_info of filter entry to copy & add
3746 * Helper function, used when creating a list of filters to remove from
3747 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3748 * original filter entry, with the exception of fltr_info.fltr_act and
3749 * fltr_info.fwd_id fields. These are set such that later logic can
3750 * extract which VSI to remove the fltr from, and pass on that information.
3752 static enum ice_status
3753 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3754 struct LIST_HEAD_TYPE *vsi_list_head,
3755 struct ice_fltr_info *fi)
3757 struct ice_fltr_list_entry *tmp;
3759 /* this memory is freed up in the caller function
3760 * once filters for this VSI are removed
3762 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3764 return ICE_ERR_NO_MEMORY;
3766 tmp->fltr_info = *fi;
3768 /* Overwrite these fields to indicate which VSI to remove filter from,
3769 * so find and remove logic can extract the information from the
3770 * list entries. Note that original entries will still have proper
3773 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3774 tmp->fltr_info.vsi_handle = vsi_handle;
3775 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3777 LIST_ADD(&tmp->list_entry, vsi_list_head);
3783 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3784 * @hw: pointer to the hardware structure
3785 * @vsi_handle: VSI handle to remove filters from
3786 * @lkup_list_head: pointer to the list that has certain lookup type filters
3787 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3789 * Locates all filters in lkup_list_head that are used by the given VSI,
3790 * and adds COPIES of those entries to vsi_list_head (intended to be used
3791 * to remove the listed filters).
3792 * Note that this means all entries in vsi_list_head must be explicitly
3793 * deallocated by the caller when done with list.
3795 static enum ice_status
3796 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3797 struct LIST_HEAD_TYPE *lkup_list_head,
3798 struct LIST_HEAD_TYPE *vsi_list_head)
3800 struct ice_fltr_mgmt_list_entry *fm_entry;
3801 enum ice_status status = ICE_SUCCESS;
3803 /* check to make sure VSI ID is valid and within boundary */
3804 if (!ice_is_vsi_valid(hw, vsi_handle))
3805 return ICE_ERR_PARAM;
3807 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3808 ice_fltr_mgmt_list_entry, list_entry) {
3809 struct ice_fltr_info *fi;
3811 fi = &fm_entry->fltr_info;
3812 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3815 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3825 * ice_determine_promisc_mask
3826 * @fi: filter info to parse
3828 * Helper function to determine which ICE_PROMISC_ mask corresponds
3829 * to given filter into.
3831 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3833 u16 vid = fi->l_data.mac_vlan.vlan_id;
3834 u8 *macaddr = fi->l_data.mac.mac_addr;
3835 bool is_tx_fltr = false;
3836 u8 promisc_mask = 0;
3838 if (fi->flag == ICE_FLTR_TX)
3841 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3842 promisc_mask |= is_tx_fltr ?
3843 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3844 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3845 promisc_mask |= is_tx_fltr ?
3846 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3847 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3848 promisc_mask |= is_tx_fltr ?
3849 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3851 promisc_mask |= is_tx_fltr ?
3852 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3854 return promisc_mask;
3858 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3859 * @hw: pointer to the hardware structure
3860 * @vsi_handle: VSI handle to retrieve info from
3861 * @promisc_mask: pointer to mask to be filled in
3862 * @vid: VLAN ID of promisc VLAN VSI
3865 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3868 struct ice_switch_info *sw = hw->switch_info;
3869 struct ice_fltr_mgmt_list_entry *itr;
3870 struct LIST_HEAD_TYPE *rule_head;
3871 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3873 if (!ice_is_vsi_valid(hw, vsi_handle))
3874 return ICE_ERR_PARAM;
3878 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3879 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3881 ice_acquire_lock(rule_lock);
3882 LIST_FOR_EACH_ENTRY(itr, rule_head,
3883 ice_fltr_mgmt_list_entry, list_entry) {
3884 /* Continue if this filter doesn't apply to this VSI or the
3885 * VSI ID is not in the VSI map for this filter
3887 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3890 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3892 ice_release_lock(rule_lock);
3898 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3899 * @hw: pointer to the hardware structure
3900 * @vsi_handle: VSI handle to retrieve info from
3901 * @promisc_mask: pointer to mask to be filled in
3902 * @vid: VLAN ID of promisc VLAN VSI
3905 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3908 struct ice_switch_info *sw = hw->switch_info;
3909 struct ice_fltr_mgmt_list_entry *itr;
3910 struct LIST_HEAD_TYPE *rule_head;
3911 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3913 if (!ice_is_vsi_valid(hw, vsi_handle))
3914 return ICE_ERR_PARAM;
3918 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3919 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3921 ice_acquire_lock(rule_lock);
3922 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3924 /* Continue if this filter doesn't apply to this VSI or the
3925 * VSI ID is not in the VSI map for this filter
3927 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3930 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3932 ice_release_lock(rule_lock);
3938 * ice_remove_promisc - Remove promisc based filter rules
3939 * @hw: pointer to the hardware structure
3940 * @recp_id: recipe ID for which the rule needs to removed
3941 * @v_list: list of promisc entries
3943 static enum ice_status
3944 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3945 struct LIST_HEAD_TYPE *v_list)
3947 struct ice_fltr_list_entry *v_list_itr, *tmp;
3949 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3951 v_list_itr->status =
3952 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3953 if (v_list_itr->status)
3954 return v_list_itr->status;
3960 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3961 * @hw: pointer to the hardware structure
3962 * @vsi_handle: VSI handle to clear mode
3963 * @promisc_mask: mask of promiscuous config bits to clear
3964 * @vid: VLAN ID to clear VLAN promiscuous
3967 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3970 struct ice_switch_info *sw = hw->switch_info;
3971 struct ice_fltr_list_entry *fm_entry, *tmp;
3972 struct LIST_HEAD_TYPE remove_list_head;
3973 struct ice_fltr_mgmt_list_entry *itr;
3974 struct LIST_HEAD_TYPE *rule_head;
3975 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3976 enum ice_status status = ICE_SUCCESS;
3979 if (!ice_is_vsi_valid(hw, vsi_handle))
3980 return ICE_ERR_PARAM;
3982 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3983 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3985 recipe_id = ICE_SW_LKUP_PROMISC;
3987 rule_head = &sw->recp_list[recipe_id].filt_rules;
3988 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3990 INIT_LIST_HEAD(&remove_list_head);
3992 ice_acquire_lock(rule_lock);
3993 LIST_FOR_EACH_ENTRY(itr, rule_head,
3994 ice_fltr_mgmt_list_entry, list_entry) {
3995 struct ice_fltr_info *fltr_info;
3996 u8 fltr_promisc_mask = 0;
3998 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4000 fltr_info = &itr->fltr_info;
4002 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4003 vid != fltr_info->l_data.mac_vlan.vlan_id)
4006 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4008 /* Skip if filter is not completely specified by given mask */
4009 if (fltr_promisc_mask & ~promisc_mask)
4012 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4016 ice_release_lock(rule_lock);
4017 goto free_fltr_list;
4020 ice_release_lock(rule_lock);
4022 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4025 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4026 ice_fltr_list_entry, list_entry) {
4027 LIST_DEL(&fm_entry->list_entry);
4028 ice_free(hw, fm_entry);
4035 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4036 * @hw: pointer to the hardware structure
4037 * @vsi_handle: VSI handle to configure
4038 * @promisc_mask: mask of promiscuous config bits
4039 * @vid: VLAN ID to set VLAN promiscuous
4042 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4044 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4045 struct ice_fltr_list_entry f_list_entry;
4046 struct ice_fltr_info new_fltr;
4047 enum ice_status status = ICE_SUCCESS;
4053 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4055 if (!ice_is_vsi_valid(hw, vsi_handle))
4056 return ICE_ERR_PARAM;
4057 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4059 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4061 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4062 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4063 new_fltr.l_data.mac_vlan.vlan_id = vid;
4064 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4066 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4067 recipe_id = ICE_SW_LKUP_PROMISC;
4070 /* Separate filters must be set for each direction/packet type
4071 * combination, so we will loop over the mask value, store the
4072 * individual type, and clear it out in the input mask as it
4075 while (promisc_mask) {
4081 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4082 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4083 pkt_type = UCAST_FLTR;
4084 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4085 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4086 pkt_type = UCAST_FLTR;
4088 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4089 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4090 pkt_type = MCAST_FLTR;
4091 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4092 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4093 pkt_type = MCAST_FLTR;
4095 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4096 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4097 pkt_type = BCAST_FLTR;
4098 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4099 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4100 pkt_type = BCAST_FLTR;
4104 /* Check for VLAN promiscuous flag */
4105 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4106 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4107 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4108 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4112 /* Set filter DA based on packet type */
4113 mac_addr = new_fltr.l_data.mac.mac_addr;
4114 if (pkt_type == BCAST_FLTR) {
4115 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4116 } else if (pkt_type == MCAST_FLTR ||
4117 pkt_type == UCAST_FLTR) {
4118 /* Use the dummy ether header DA */
4119 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4120 ICE_NONDMA_TO_NONDMA);
4121 if (pkt_type == MCAST_FLTR)
4122 mac_addr[0] |= 0x1; /* Set multicast bit */
4125 /* Need to reset this to zero for all iterations */
4128 new_fltr.flag |= ICE_FLTR_TX;
4129 new_fltr.src = hw_vsi_id;
4131 new_fltr.flag |= ICE_FLTR_RX;
4132 new_fltr.src = hw->port_info->lport;
4135 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4136 new_fltr.vsi_handle = vsi_handle;
4137 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4138 f_list_entry.fltr_info = new_fltr;
4140 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4141 if (status != ICE_SUCCESS)
4142 goto set_promisc_exit;
4150 * ice_set_vlan_vsi_promisc
4151 * @hw: pointer to the hardware structure
4152 * @vsi_handle: VSI handle to configure
4153 * @promisc_mask: mask of promiscuous config bits
4154 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4156 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4159 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4160 bool rm_vlan_promisc)
4162 struct ice_switch_info *sw = hw->switch_info;
4163 struct ice_fltr_list_entry *list_itr, *tmp;
4164 struct LIST_HEAD_TYPE vsi_list_head;
4165 struct LIST_HEAD_TYPE *vlan_head;
4166 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4167 enum ice_status status;
4170 INIT_LIST_HEAD(&vsi_list_head);
4171 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4172 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4173 ice_acquire_lock(vlan_lock);
4174 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4176 ice_release_lock(vlan_lock);
4178 goto free_fltr_list;
4180 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4182 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4183 if (rm_vlan_promisc)
4184 status = ice_clear_vsi_promisc(hw, vsi_handle,
4185 promisc_mask, vlan_id);
4187 status = ice_set_vsi_promisc(hw, vsi_handle,
4188 promisc_mask, vlan_id);
4194 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4195 ice_fltr_list_entry, list_entry) {
4196 LIST_DEL(&list_itr->list_entry);
4197 ice_free(hw, list_itr);
4203 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4204 * @hw: pointer to the hardware structure
4205 * @vsi_handle: VSI handle to remove filters from
4206 * @lkup: switch rule filter lookup type
4209 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4210 enum ice_sw_lkup_type lkup)
4212 struct ice_switch_info *sw = hw->switch_info;
4213 struct ice_fltr_list_entry *fm_entry;
4214 struct LIST_HEAD_TYPE remove_list_head;
4215 struct LIST_HEAD_TYPE *rule_head;
4216 struct ice_fltr_list_entry *tmp;
4217 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4218 enum ice_status status;
4220 INIT_LIST_HEAD(&remove_list_head);
4221 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4222 rule_head = &sw->recp_list[lkup].filt_rules;
4223 ice_acquire_lock(rule_lock);
4224 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4226 ice_release_lock(rule_lock);
4231 case ICE_SW_LKUP_MAC:
4232 ice_remove_mac(hw, &remove_list_head);
4234 case ICE_SW_LKUP_VLAN:
4235 ice_remove_vlan(hw, &remove_list_head);
4237 case ICE_SW_LKUP_PROMISC:
4238 case ICE_SW_LKUP_PROMISC_VLAN:
4239 ice_remove_promisc(hw, lkup, &remove_list_head);
4241 case ICE_SW_LKUP_MAC_VLAN:
4242 ice_remove_mac_vlan(hw, &remove_list_head);
4244 case ICE_SW_LKUP_ETHERTYPE:
4245 case ICE_SW_LKUP_ETHERTYPE_MAC:
4246 ice_remove_eth_mac(hw, &remove_list_head);
4248 case ICE_SW_LKUP_DFLT:
4249 ice_debug(hw, ICE_DBG_SW,
4250 "Remove filters for this lookup type hasn't been implemented yet\n");
4252 case ICE_SW_LKUP_LAST:
4253 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4257 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4258 ice_fltr_list_entry, list_entry) {
4259 LIST_DEL(&fm_entry->list_entry);
4260 ice_free(hw, fm_entry);
4265 * ice_remove_vsi_fltr - Remove all filters for a VSI
4266 * @hw: pointer to the hardware structure
4267 * @vsi_handle: VSI handle to remove filters from
4269 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4271 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4273 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4274 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4275 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4276 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4277 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4278 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4279 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4280 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4284 * ice_alloc_res_cntr - allocating resource counter
4285 * @hw: pointer to the hardware structure
4286 * @type: type of resource
4287 * @alloc_shared: if set it is shared else dedicated
4288 * @num_items: number of entries requested for FD resource type
4289 * @counter_id: counter index returned by AQ call
4292 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4295 struct ice_aqc_alloc_free_res_elem *buf;
4296 enum ice_status status;
4299 /* Allocate resource */
4300 buf_len = sizeof(*buf);
4301 buf = (struct ice_aqc_alloc_free_res_elem *)
4302 ice_malloc(hw, buf_len);
4304 return ICE_ERR_NO_MEMORY;
4306 buf->num_elems = CPU_TO_LE16(num_items);
4307 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4308 ICE_AQC_RES_TYPE_M) | alloc_shared);
4310 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4311 ice_aqc_opc_alloc_res, NULL);
4315 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4323 * ice_free_res_cntr - free resource counter
4324 * @hw: pointer to the hardware structure
4325 * @type: type of resource
4326 * @alloc_shared: if set it is shared else dedicated
4327 * @num_items: number of entries to be freed for FD resource type
4328 * @counter_id: counter ID resource which needs to be freed
4331 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4334 struct ice_aqc_alloc_free_res_elem *buf;
4335 enum ice_status status;
4339 buf_len = sizeof(*buf);
4340 buf = (struct ice_aqc_alloc_free_res_elem *)
4341 ice_malloc(hw, buf_len);
4343 return ICE_ERR_NO_MEMORY;
4345 buf->num_elems = CPU_TO_LE16(num_items);
4346 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4347 ICE_AQC_RES_TYPE_M) | alloc_shared);
4348 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4350 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4351 ice_aqc_opc_free_res, NULL);
4353 ice_debug(hw, ICE_DBG_SW,
4354 "counter resource could not be freed\n");
4361 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4362 * @hw: pointer to the hardware structure
4363 * @counter_id: returns counter index
4365 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4367 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4368 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4373 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4374 * @hw: pointer to the hardware structure
4375 * @counter_id: counter index to be freed
4377 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4379 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4380 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4385 * ice_alloc_res_lg_act - add large action resource
4386 * @hw: pointer to the hardware structure
4387 * @l_id: large action ID to fill it in
4388 * @num_acts: number of actions to hold with a large action entry
4390 static enum ice_status
4391 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4393 struct ice_aqc_alloc_free_res_elem *sw_buf;
4394 enum ice_status status;
4397 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4398 return ICE_ERR_PARAM;
4400 /* Allocate resource for large action */
4401 buf_len = sizeof(*sw_buf);
4402 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4403 ice_malloc(hw, buf_len);
4405 return ICE_ERR_NO_MEMORY;
4407 sw_buf->num_elems = CPU_TO_LE16(1);
4409 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4410 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4411 * If num_acts is greater than 2, then use
4412 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4413 * The num_acts cannot exceed 4. This was ensured at the
4414 * beginning of the function.
4417 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4418 else if (num_acts == 2)
4419 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4421 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4423 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4424 ice_aqc_opc_alloc_res, NULL);
4426 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4428 ice_free(hw, sw_buf);
4433 * ice_add_mac_with_sw_marker - add filter with sw marker
4434 * @hw: pointer to the hardware structure
4435 * @f_info: filter info structure containing the MAC filter information
4436 * @sw_marker: sw marker to tag the Rx descriptor with
4439 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4442 struct ice_switch_info *sw = hw->switch_info;
4443 struct ice_fltr_mgmt_list_entry *m_entry;
4444 struct ice_fltr_list_entry fl_info;
4445 struct LIST_HEAD_TYPE l_head;
4446 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4447 enum ice_status ret;
4451 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4452 return ICE_ERR_PARAM;
4454 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4455 return ICE_ERR_PARAM;
4457 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4458 return ICE_ERR_PARAM;
4460 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4461 return ICE_ERR_PARAM;
4462 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4464 /* Add filter if it doesn't exist so then the adding of large
4465 * action always results in update
4468 INIT_LIST_HEAD(&l_head);
4469 fl_info.fltr_info = *f_info;
4470 LIST_ADD(&fl_info.list_entry, &l_head);
4472 entry_exists = false;
4473 ret = ice_add_mac(hw, &l_head);
4474 if (ret == ICE_ERR_ALREADY_EXISTS)
4475 entry_exists = true;
4479 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4480 ice_acquire_lock(rule_lock);
4481 /* Get the book keeping entry for the filter */
4482 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4486 /* If counter action was enabled for this rule then don't enable
4487 * sw marker large action
4489 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4490 ret = ICE_ERR_PARAM;
4494 /* if same marker was added before */
4495 if (m_entry->sw_marker_id == sw_marker) {
4496 ret = ICE_ERR_ALREADY_EXISTS;
4500 /* Allocate a hardware table entry to hold large act. Three actions
4501 * for marker based large action
4503 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4507 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4510 /* Update the switch rule to add the marker action */
4511 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4513 ice_release_lock(rule_lock);
4518 ice_release_lock(rule_lock);
4519 /* only remove entry if it did not exist previously */
4521 ret = ice_remove_mac(hw, &l_head);
4527 * ice_add_mac_with_counter - add filter with counter enabled
4528 * @hw: pointer to the hardware structure
4529 * @f_info: pointer to filter info structure containing the MAC filter
4533 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4535 struct ice_switch_info *sw = hw->switch_info;
4536 struct ice_fltr_mgmt_list_entry *m_entry;
4537 struct ice_fltr_list_entry fl_info;
4538 struct LIST_HEAD_TYPE l_head;
4539 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4540 enum ice_status ret;
4545 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4546 return ICE_ERR_PARAM;
4548 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4549 return ICE_ERR_PARAM;
4551 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4552 return ICE_ERR_PARAM;
4553 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4555 entry_exist = false;
4557 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4559 /* Add filter if it doesn't exist so then the adding of large
4560 * action always results in update
4562 INIT_LIST_HEAD(&l_head);
4564 fl_info.fltr_info = *f_info;
4565 LIST_ADD(&fl_info.list_entry, &l_head);
4567 ret = ice_add_mac(hw, &l_head);
4568 if (ret == ICE_ERR_ALREADY_EXISTS)
4573 ice_acquire_lock(rule_lock);
4574 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4576 ret = ICE_ERR_BAD_PTR;
4580 /* Don't enable counter for a filter for which sw marker was enabled */
4581 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4582 ret = ICE_ERR_PARAM;
4586 /* If a counter was already enabled then don't need to add again */
4587 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4588 ret = ICE_ERR_ALREADY_EXISTS;
4592 /* Allocate a hardware table entry to VLAN counter */
4593 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4597 /* Allocate a hardware table entry to hold large act. Two actions for
4598 * counter based large action
4600 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4604 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4607 /* Update the switch rule to add the counter action */
4608 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4610 ice_release_lock(rule_lock);
4615 ice_release_lock(rule_lock);
4616 /* only remove entry if it did not exist previously */
4618 ret = ice_remove_mac(hw, &l_head);
4623 /* This is mapping table entry that maps every word within a given protocol
4624 * structure to the real byte offset as per the specification of that
4626 * for example dst address is 3 words in ethertype header and corresponding
4627 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4628 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4629 * matching entry describing its field. This needs to be updated if new
4630 * structure is added to that union.
4632 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4633 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4634 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4635 { ICE_ETYPE_OL, { 0 } },
4636 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4637 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4638 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4639 26, 28, 30, 32, 34, 36, 38 } },
4640 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4641 26, 28, 30, 32, 34, 36, 38 } },
4642 { ICE_TCP_IL, { 0, 2 } },
4643 { ICE_UDP_OF, { 0, 2 } },
4644 { ICE_UDP_ILOS, { 0, 2 } },
4645 { ICE_SCTP_IL, { 0, 2 } },
4646 { ICE_VXLAN, { 8, 10, 12, 14 } },
4647 { ICE_GENEVE, { 8, 10, 12, 14 } },
4648 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4649 { ICE_NVGRE, { 0, 2, 4, 6 } },
4650 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4651 { ICE_PPPOE, { 0, 2, 4, 6 } },
4652 { ICE_PROTOCOL_LAST, { 0 } }
4655 /* The following table describes preferred grouping of recipes.
4656 * If a recipe that needs to be programmed is a superset or matches one of the
4657 * following combinations, then the recipe needs to be chained as per the
4661 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4662 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4663 { ICE_MAC_IL, ICE_MAC_IL_HW },
4664 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4665 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4666 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4667 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4668 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4669 { ICE_TCP_IL, ICE_TCP_IL_HW },
4670 { ICE_UDP_OF, ICE_UDP_OF_HW },
4671 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4672 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4673 { ICE_VXLAN, ICE_UDP_OF_HW },
4674 { ICE_GENEVE, ICE_UDP_OF_HW },
4675 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4676 { ICE_NVGRE, ICE_GRE_OF_HW },
4677 { ICE_GTP, ICE_UDP_OF_HW },
4678 { ICE_PPPOE, ICE_PPPOE_HW },
4679 { ICE_PROTOCOL_LAST, 0 }
4683 * ice_find_recp - find a recipe
4684 * @hw: pointer to the hardware structure
4685 * @lkup_exts: extension sequence to match
4687 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4689 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4691 bool refresh_required = true;
4692 struct ice_sw_recipe *recp;
4695 /* Walk through existing recipes to find a match */
4696 recp = hw->switch_info->recp_list;
4697 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4698 /* If recipe was not created for this ID, in SW bookkeeping,
4699 * check if FW has an entry for this recipe. If the FW has an
4700 * entry update it in our SW bookkeeping and continue with the
4703 if (!recp[i].recp_created)
4704 if (ice_get_recp_frm_fw(hw,
4705 hw->switch_info->recp_list, i,
4709 /* if number of words we are looking for match */
4710 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4711 struct ice_fv_word *a = lkup_exts->fv_words;
4712 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4716 for (p = 0; p < lkup_exts->n_val_words; p++) {
4717 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4719 if (a[p].off == b[q].off &&
4720 a[p].prot_id == b[q].prot_id)
4721 /* Found the "p"th word in the
4726 /* After walking through all the words in the
4727 * "i"th recipe if "p"th word was not found then
4728 * this recipe is not what we are looking for.
4729 * So break out from this loop and try the next
4732 if (q >= recp[i].lkup_exts.n_val_words) {
4737 /* If for "i"th recipe the found was never set to false
4738 * then it means we found our match
4741 return i; /* Return the recipe ID */
4744 return ICE_MAX_NUM_RECIPES;
4748 * ice_prot_type_to_id - get protocol ID from protocol type
4749 * @type: protocol type
4750 * @id: pointer to variable that will receive the ID
4752 * Returns true if found, false otherwise
4754 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4758 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4759 if (ice_prot_id_tbl[i].type == type) {
4760 *id = ice_prot_id_tbl[i].protocol_id;
4767 * ice_find_valid_words - count valid words
4768 * @rule: advanced rule with lookup information
4769 * @lkup_exts: byte offset extractions of the words that are valid
4771 * calculate valid words in a lookup rule using mask value
4774 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4775 struct ice_prot_lkup_ext *lkup_exts)
4781 if (!ice_prot_type_to_id(rule->type, &prot_id))
4784 word = lkup_exts->n_val_words;
4786 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4787 if (((u16 *)&rule->m_u)[j] &&
4788 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4789 /* No more space to accommodate */
4790 if (word >= ICE_MAX_CHAIN_WORDS)
4792 lkup_exts->fv_words[word].off =
4793 ice_prot_ext[rule->type].offs[j];
4794 lkup_exts->fv_words[word].prot_id =
4795 ice_prot_id_tbl[rule->type].protocol_id;
4796 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4800 ret_val = word - lkup_exts->n_val_words;
4801 lkup_exts->n_val_words = word;
4809 * ice_create_first_fit_recp_def - Create a recipe grouping
4810 * @hw: pointer to the hardware structure
4811 * @lkup_exts: an array of protocol header extractions
4812 * @rg_list: pointer to a list that stores new recipe groups
4813 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4815 * Using first fit algorithm, take all the words that are still not done
4816 * and start grouping them in 4-word groups. Each group makes up one
4819 static enum ice_status
4820 ice_create_first_fit_recp_def(struct ice_hw *hw,
4821 struct ice_prot_lkup_ext *lkup_exts,
4822 struct LIST_HEAD_TYPE *rg_list,
4825 struct ice_pref_recipe_group *grp = NULL;
4830 /* Walk through every word in the rule to check if it is not done. If so
4831 * then this word needs to be part of a new recipe.
4833 for (j = 0; j < lkup_exts->n_val_words; j++)
4834 if (!ice_is_bit_set(lkup_exts->done, j)) {
4836 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4837 struct ice_recp_grp_entry *entry;
4839 entry = (struct ice_recp_grp_entry *)
4840 ice_malloc(hw, sizeof(*entry));
4842 return ICE_ERR_NO_MEMORY;
4843 LIST_ADD(&entry->l_entry, rg_list);
4844 grp = &entry->r_group;
4848 grp->pairs[grp->n_val_pairs].prot_id =
4849 lkup_exts->fv_words[j].prot_id;
4850 grp->pairs[grp->n_val_pairs].off =
4851 lkup_exts->fv_words[j].off;
4852 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4860 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4861 * @hw: pointer to the hardware structure
4862 * @fv_list: field vector with the extraction sequence information
4863 * @rg_list: recipe groupings with protocol-offset pairs
4865 * Helper function to fill in the field vector indices for protocol-offset
4866 * pairs. These indexes are then ultimately programmed into a recipe.
4868 static enum ice_status
4869 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4870 struct LIST_HEAD_TYPE *rg_list)
4872 struct ice_sw_fv_list_entry *fv;
4873 struct ice_recp_grp_entry *rg;
4874 struct ice_fv_word *fv_ext;
4876 if (LIST_EMPTY(fv_list))
4879 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4880 fv_ext = fv->fv_ptr->ew;
4882 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4885 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4886 struct ice_fv_word *pr;
4891 pr = &rg->r_group.pairs[i];
4892 mask = rg->r_group.mask[i];
4894 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4895 if (fv_ext[j].prot_id == pr->prot_id &&
4896 fv_ext[j].off == pr->off) {
4899 /* Store index of field vector */
4901 /* Mask is given by caller as big
4902 * endian, but sent to FW as little
4905 rg->fv_mask[i] = mask << 8 | mask >> 8;
4909 /* Protocol/offset could not be found, caller gave an
4913 return ICE_ERR_PARAM;
4921 * ice_find_free_recp_res_idx - find free result indexes for recipe
4922 * @hw: pointer to hardware structure
4923 * @profiles: bitmap of profiles that will be associated with the new recipe
4924 * @free_idx: pointer to variable to receive the free index bitmap
4926 * The algorithm used here is:
4927 * 1. When creating a new recipe, create a set P which contains all
4928 * Profiles that will be associated with our new recipe
4930 * 2. For each Profile p in set P:
4931 * a. Add all recipes associated with Profile p into set R
4932 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4933 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4934 * i. Or just assume they all have the same possible indexes:
4936 * i.e., PossibleIndexes = 0x0000F00000000000
4938 * 3. For each Recipe r in set R:
4939 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4940 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4942 * FreeIndexes will contain the bits indicating the indexes free for use,
4943 * then the code needs to update the recipe[r].used_result_idx_bits to
4944 * indicate which indexes were selected for use by this recipe.
4947 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
4948 ice_bitmap_t *free_idx)
4950 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4951 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
4952 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4956 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
4957 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
4958 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4959 ice_init_possible_res_bm(possible_idx);
4961 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
4962 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
4963 ice_set_bit(bit, possible_idx);
4965 /* For each profile we are going to associate the recipe with, add the
4966 * recipes that are associated with that profile. This will give us
4967 * the set of recipes that our recipe may collide with.
4970 while (ICE_MAX_NUM_PROFILES >
4971 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
4972 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
4973 ICE_MAX_NUM_RECIPES);
4978 /* For each recipe that our new recipe may collide with, determine
4979 * which indexes have been used.
4981 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
4982 if (ice_is_bit_set(recipes, bit))
4983 ice_or_bitmap(used_idx, used_idx,
4984 hw->switch_info->recp_list[bit].res_idxs,
4987 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4989 /* return number of free indexes */
4991 while (ICE_MAX_FV_WORDS >
4992 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5001 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5002 * @hw: pointer to hardware structure
5003 * @rm: recipe management list entry
5004 * @match_tun: if field vector index for tunnel needs to be programmed
5005 * @profiles: bitmap of profiles that will be assocated.
5007 static enum ice_status
5008 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5009 bool match_tun, ice_bitmap_t *profiles)
5011 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5012 struct ice_aqc_recipe_data_elem *tmp;
5013 struct ice_aqc_recipe_data_elem *buf;
5014 struct ice_recp_grp_entry *entry;
5015 enum ice_status status;
5021 /* When more than one recipe are required, another recipe is needed to
5022 * chain them together. Matching a tunnel metadata ID takes up one of
5023 * the match fields in the chaining recipe reducing the number of
5024 * chained recipes by one.
5026 /* check number of free result indices */
5027 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5028 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5030 if (rm->n_grp_count > 1) {
5031 if (rm->n_grp_count > free_res_idx)
5032 return ICE_ERR_MAX_LIMIT;
5037 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5038 ICE_MAX_NUM_RECIPES,
5041 return ICE_ERR_NO_MEMORY;
5043 buf = (struct ice_aqc_recipe_data_elem *)
5044 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5046 status = ICE_ERR_NO_MEMORY;
5050 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5051 recipe_count = ICE_MAX_NUM_RECIPES;
5052 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5054 if (status || recipe_count == 0)
5057 /* Allocate the recipe resources, and configure them according to the
5058 * match fields from protocol headers and extracted field vectors.
5060 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5061 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5064 status = ice_alloc_recipe(hw, &entry->rid);
5068 /* Clear the result index of the located recipe, as this will be
5069 * updated, if needed, later in the recipe creation process.
5071 tmp[0].content.result_indx = 0;
5073 buf[recps] = tmp[0];
5074 buf[recps].recipe_indx = (u8)entry->rid;
5075 /* if the recipe is a non-root recipe RID should be programmed
5076 * as 0 for the rules to be applied correctly.
5078 buf[recps].content.rid = 0;
5079 ice_memset(&buf[recps].content.lkup_indx, 0,
5080 sizeof(buf[recps].content.lkup_indx),
5083 /* All recipes use look-up index 0 to match switch ID. */
5084 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5085 buf[recps].content.mask[0] =
5086 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5087 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5090 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5091 buf[recps].content.lkup_indx[i] = 0x80;
5092 buf[recps].content.mask[i] = 0;
5095 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5096 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5097 buf[recps].content.mask[i + 1] =
5098 CPU_TO_LE16(entry->fv_mask[i]);
5101 if (rm->n_grp_count > 1) {
5102 /* Checks to see if there really is a valid result index
5105 if (chain_idx >= ICE_MAX_FV_WORDS) {
5106 ice_debug(hw, ICE_DBG_SW,
5107 "No chain index available\n");
5108 status = ICE_ERR_MAX_LIMIT;
5112 entry->chain_idx = chain_idx;
5113 buf[recps].content.result_indx =
5114 ICE_AQ_RECIPE_RESULT_EN |
5115 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5116 ICE_AQ_RECIPE_RESULT_DATA_M);
5117 ice_clear_bit(chain_idx, result_idx_bm);
5118 chain_idx = ice_find_first_bit(result_idx_bm,
5122 /* fill recipe dependencies */
5123 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5124 ICE_MAX_NUM_RECIPES);
5125 ice_set_bit(buf[recps].recipe_indx,
5126 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5127 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5131 if (rm->n_grp_count == 1) {
5132 rm->root_rid = buf[0].recipe_indx;
5133 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5134 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5135 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5136 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5137 sizeof(buf[0].recipe_bitmap),
5138 ICE_NONDMA_TO_NONDMA);
5140 status = ICE_ERR_BAD_PTR;
5143 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5144 * the recipe which is getting created if specified
5145 * by user. Usually any advanced switch filter, which results
5146 * into new extraction sequence, ended up creating a new recipe
5147 * of type ROOT and usually recipes are associated with profiles
5148 * Switch rule referreing newly created recipe, needs to have
5149 * either/or 'fwd' or 'join' priority, otherwise switch rule
5150 * evaluation will not happen correctly. In other words, if
5151 * switch rule to be evaluated on priority basis, then recipe
5152 * needs to have priority, otherwise it will be evaluated last.
5154 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5156 struct ice_recp_grp_entry *last_chain_entry;
5159 /* Allocate the last recipe that will chain the outcomes of the
5160 * other recipes together
5162 status = ice_alloc_recipe(hw, &rid);
5166 buf[recps].recipe_indx = (u8)rid;
5167 buf[recps].content.rid = (u8)rid;
5168 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5169 /* the new entry created should also be part of rg_list to
5170 * make sure we have complete recipe
5172 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5173 sizeof(*last_chain_entry));
5174 if (!last_chain_entry) {
5175 status = ICE_ERR_NO_MEMORY;
5178 last_chain_entry->rid = rid;
5179 ice_memset(&buf[recps].content.lkup_indx, 0,
5180 sizeof(buf[recps].content.lkup_indx),
5182 /* All recipes use look-up index 0 to match switch ID. */
5183 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5184 buf[recps].content.mask[0] =
5185 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5186 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5187 buf[recps].content.lkup_indx[i] =
5188 ICE_AQ_RECIPE_LKUP_IGNORE;
5189 buf[recps].content.mask[i] = 0;
5193 /* update r_bitmap with the recp that is used for chaining */
5194 ice_set_bit(rid, rm->r_bitmap);
5195 /* this is the recipe that chains all the other recipes so it
5196 * should not have a chaining ID to indicate the same
5198 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5199 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5201 last_chain_entry->fv_idx[i] = entry->chain_idx;
5202 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5203 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5204 ice_set_bit(entry->rid, rm->r_bitmap);
5206 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5207 if (sizeof(buf[recps].recipe_bitmap) >=
5208 sizeof(rm->r_bitmap)) {
5209 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5210 sizeof(buf[recps].recipe_bitmap),
5211 ICE_NONDMA_TO_NONDMA);
5213 status = ICE_ERR_BAD_PTR;
5216 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5218 /* To differentiate among different UDP tunnels, a meta data ID
5222 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5223 buf[recps].content.mask[i] =
5224 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5228 rm->root_rid = (u8)rid;
5230 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5234 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5235 ice_release_change_lock(hw);
5239 /* Every recipe that just got created add it to the recipe
5242 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5243 struct ice_switch_info *sw = hw->switch_info;
5244 bool is_root, idx_found = false;
5245 struct ice_sw_recipe *recp;
5246 u16 idx, buf_idx = 0;
5248 /* find buffer index for copying some data */
5249 for (idx = 0; idx < rm->n_grp_count; idx++)
5250 if (buf[idx].recipe_indx == entry->rid) {
5256 status = ICE_ERR_OUT_OF_RANGE;
5260 recp = &sw->recp_list[entry->rid];
5261 is_root = (rm->root_rid == entry->rid);
5262 recp->is_root = is_root;
5264 recp->root_rid = entry->rid;
5265 recp->big_recp = (is_root && rm->n_grp_count > 1);
5267 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5268 entry->r_group.n_val_pairs *
5269 sizeof(struct ice_fv_word),
5270 ICE_NONDMA_TO_NONDMA);
5272 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5273 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5275 /* Copy non-result fv index values and masks to recipe. This
5276 * call will also update the result recipe bitmask.
5278 ice_collect_result_idx(&buf[buf_idx], recp);
5280 /* for non-root recipes, also copy to the root, this allows
5281 * easier matching of a complete chained recipe
5284 ice_collect_result_idx(&buf[buf_idx],
5285 &sw->recp_list[rm->root_rid]);
5287 recp->n_ext_words = entry->r_group.n_val_pairs;
5288 recp->chain_idx = entry->chain_idx;
5289 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5290 recp->tun_type = rm->tun_type;
5291 recp->recp_created = true;
5306 * ice_create_recipe_group - creates recipe group
5307 * @hw: pointer to hardware structure
5308 * @rm: recipe management list entry
5309 * @lkup_exts: lookup elements
5311 static enum ice_status
5312 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5313 struct ice_prot_lkup_ext *lkup_exts)
5315 enum ice_status status;
5318 rm->n_grp_count = 0;
5320 /* Create recipes for words that are marked not done by packing them
5323 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5324 &rm->rg_list, &recp_count);
5326 rm->n_grp_count += recp_count;
5327 rm->n_ext_words = lkup_exts->n_val_words;
5328 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5329 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5330 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5331 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5338 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5339 * @hw: pointer to hardware structure
5340 * @lkups: lookup elements or match criteria for the advanced recipe, one
5341 * structure per protocol header
5342 * @lkups_cnt: number of protocols
5343 * @bm: bitmap of field vectors to consider
5344 * @fv_list: pointer to a list that holds the returned field vectors
5346 static enum ice_status
5347 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5348 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5350 enum ice_status status;
5354 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5356 return ICE_ERR_NO_MEMORY;
5358 for (i = 0; i < lkups_cnt; i++)
5359 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5360 status = ICE_ERR_CFG;
5364 /* Find field vectors that include all specified protocol types */
5365 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5368 ice_free(hw, prot_ids);
5373 * ice_add_special_words - Add words that are not protocols, such as metadata
5374 * @rinfo: other information regarding the rule e.g. priority and action info
5375 * @lkup_exts: lookup word structure
5377 static enum ice_status
5378 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5379 struct ice_prot_lkup_ext *lkup_exts)
5381 /* If this is a tunneled packet, then add recipe index to match the
5382 * tunnel bit in the packet metadata flags.
5384 if (rinfo->tun_type != ICE_NON_TUN) {
5385 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5386 u8 word = lkup_exts->n_val_words++;
5388 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5389 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5391 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5393 return ICE_ERR_MAX_LIMIT;
5400 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5401 * @hw: pointer to hardware structure
5402 * @rinfo: other information regarding the rule e.g. priority and action info
5403 * @bm: pointer to memory for returning the bitmap of field vectors
5406 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5409 enum ice_prof_type type;
5411 switch (rinfo->tun_type) {
5413 type = ICE_PROF_NON_TUN;
5415 case ICE_ALL_TUNNELS:
5416 type = ICE_PROF_TUN_ALL;
5418 case ICE_SW_TUN_VXLAN_GPE:
5419 case ICE_SW_TUN_GENEVE:
5420 case ICE_SW_TUN_VXLAN:
5421 case ICE_SW_TUN_UDP:
5422 case ICE_SW_TUN_GTP:
5423 type = ICE_PROF_TUN_UDP;
5425 case ICE_SW_TUN_NVGRE:
5426 type = ICE_PROF_TUN_GRE;
5428 case ICE_SW_TUN_PPPOE:
5429 type = ICE_PROF_TUN_PPPOE;
5431 case ICE_SW_TUN_AND_NON_TUN:
5433 type = ICE_PROF_ALL;
5437 ice_get_sw_fv_bitmap(hw, type, bm);
5441 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5442 * @hw: pointer to hardware structure
5443 * @lkups: lookup elements or match criteria for the advanced recipe, one
5444 * structure per protocol header
5445 * @lkups_cnt: number of protocols
5446 * @rinfo: other information regarding the rule e.g. priority and action info
5447 * @rid: return the recipe ID of the recipe created
5449 static enum ice_status
5450 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5451 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5453 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5454 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5455 struct ice_prot_lkup_ext *lkup_exts;
5456 struct ice_recp_grp_entry *r_entry;
5457 struct ice_sw_fv_list_entry *fvit;
5458 struct ice_recp_grp_entry *r_tmp;
5459 struct ice_sw_fv_list_entry *tmp;
5460 enum ice_status status = ICE_SUCCESS;
5461 struct ice_sw_recipe *rm;
5462 bool match_tun = false;
5466 return ICE_ERR_PARAM;
5468 lkup_exts = (struct ice_prot_lkup_ext *)
5469 ice_malloc(hw, sizeof(*lkup_exts));
5471 return ICE_ERR_NO_MEMORY;
5473 /* Determine the number of words to be matched and if it exceeds a
5474 * recipe's restrictions
5476 for (i = 0; i < lkups_cnt; i++) {
5479 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5480 status = ICE_ERR_CFG;
5481 goto err_free_lkup_exts;
5484 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5486 status = ICE_ERR_CFG;
5487 goto err_free_lkup_exts;
5491 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5493 status = ICE_ERR_NO_MEMORY;
5494 goto err_free_lkup_exts;
5497 /* Get field vectors that contain fields extracted from all the protocol
5498 * headers being programmed.
5500 INIT_LIST_HEAD(&rm->fv_list);
5501 INIT_LIST_HEAD(&rm->rg_list);
5503 /* Get bitmap of field vectors (profiles) that are compatible with the
5504 * rule request; only these will be searched in the subsequent call to
5507 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5509 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5513 /* Group match words into recipes using preferred recipe grouping
5516 status = ice_create_recipe_group(hw, rm, lkup_exts);
5520 /* There is only profile for UDP tunnels. So, it is necessary to use a
5521 * metadata ID flag to differentiate different tunnel types. A separate
5522 * recipe needs to be used for the metadata.
5524 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5525 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5526 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5529 /* set the recipe priority if specified */
5530 rm->priority = rinfo->priority ? rinfo->priority : 0;
5532 /* Find offsets from the field vector. Pick the first one for all the
5535 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5539 /* get bitmap of all profiles the recipe will be associated with */
5540 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5541 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5543 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5544 ice_set_bit((u16)fvit->profile_id, profiles);
5547 /* Create any special protocol/offset pairs, such as looking at tunnel
5548 * bits by extracting metadata
5550 status = ice_add_special_words(rinfo, lkup_exts);
5552 goto err_free_lkup_exts;
5554 /* Look for a recipe which matches our requested fv / mask list */
5555 *rid = ice_find_recp(hw, lkup_exts);
5556 if (*rid < ICE_MAX_NUM_RECIPES)
5557 /* Success if found a recipe that match the existing criteria */
5560 /* Recipe we need does not exist, add a recipe */
5561 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5565 /* Associate all the recipes created with all the profiles in the
5566 * common field vector.
5568 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5570 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5572 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5573 (u8 *)r_bitmap, NULL);
5577 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5578 ICE_MAX_NUM_RECIPES);
5579 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5583 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5586 ice_release_change_lock(hw);
5592 *rid = rm->root_rid;
5593 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5594 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5596 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5597 ice_recp_grp_entry, l_entry) {
5598 LIST_DEL(&r_entry->l_entry);
5599 ice_free(hw, r_entry);
5602 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5604 LIST_DEL(&fvit->list_entry);
5609 ice_free(hw, rm->root_buf);
5614 ice_free(hw, lkup_exts);
5620 * ice_find_dummy_packet - find dummy packet by tunnel type
5622 * @lkups: lookup elements or match criteria for the advanced recipe, one
5623 * structure per protocol header
5624 * @lkups_cnt: number of protocols
5625 * @tun_type: tunnel type from the match criteria
5626 * @pkt: dummy packet to fill according to filter match criteria
5627 * @pkt_len: packet length of dummy packet
5628 * @offsets: pointer to receive the pointer to the offsets for the packet
5631 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5632 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5634 const struct ice_dummy_pkt_offsets **offsets)
5636 bool tcp = false, udp = false, ipv6 = false;
5639 if (tun_type == ICE_SW_TUN_GTP) {
5640 *pkt = dummy_udp_gtp_packet;
5641 *pkt_len = sizeof(dummy_udp_gtp_packet);
5642 *offsets = dummy_udp_gtp_packet_offsets;
5645 if (tun_type == ICE_SW_TUN_PPPOE) {
5646 *pkt = dummy_pppoe_packet;
5647 *pkt_len = sizeof(dummy_pppoe_packet);
5648 *offsets = dummy_pppoe_packet_offsets;
5651 for (i = 0; i < lkups_cnt; i++) {
5652 if (lkups[i].type == ICE_UDP_ILOS)
5654 else if (lkups[i].type == ICE_TCP_IL)
5656 else if (lkups[i].type == ICE_IPV6_OFOS)
5660 if (tun_type == ICE_ALL_TUNNELS) {
5661 *pkt = dummy_gre_udp_packet;
5662 *pkt_len = sizeof(dummy_gre_udp_packet);
5663 *offsets = dummy_gre_udp_packet_offsets;
5667 if (tun_type == ICE_SW_TUN_NVGRE) {
5669 *pkt = dummy_gre_tcp_packet;
5670 *pkt_len = sizeof(dummy_gre_tcp_packet);
5671 *offsets = dummy_gre_tcp_packet_offsets;
5675 *pkt = dummy_gre_udp_packet;
5676 *pkt_len = sizeof(dummy_gre_udp_packet);
5677 *offsets = dummy_gre_udp_packet_offsets;
5681 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5682 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5684 *pkt = dummy_udp_tun_tcp_packet;
5685 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5686 *offsets = dummy_udp_tun_tcp_packet_offsets;
5690 *pkt = dummy_udp_tun_udp_packet;
5691 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5692 *offsets = dummy_udp_tun_udp_packet_offsets;
5697 *pkt = dummy_udp_packet;
5698 *pkt_len = sizeof(dummy_udp_packet);
5699 *offsets = dummy_udp_packet_offsets;
5701 } else if (udp && ipv6) {
5702 *pkt = dummy_udp_ipv6_packet;
5703 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5704 *offsets = dummy_udp_ipv6_packet_offsets;
5706 } else if ((tcp && ipv6) || ipv6) {
5707 *pkt = dummy_tcp_ipv6_packet;
5708 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5709 *offsets = dummy_tcp_ipv6_packet_offsets;
5713 *pkt = dummy_tcp_packet;
5714 *pkt_len = sizeof(dummy_tcp_packet);
5715 *offsets = dummy_tcp_packet_offsets;
5719 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5721 * @lkups: lookup elements or match criteria for the advanced recipe, one
5722 * structure per protocol header
5723 * @lkups_cnt: number of protocols
5724 * @s_rule: stores rule information from the match criteria
5725 * @dummy_pkt: dummy packet to fill according to filter match criteria
5726 * @pkt_len: packet length of dummy packet
5727 * @offsets: offset info for the dummy packet
5729 static enum ice_status
5730 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5731 struct ice_aqc_sw_rules_elem *s_rule,
5732 const u8 *dummy_pkt, u16 pkt_len,
5733 const struct ice_dummy_pkt_offsets *offsets)
5738 /* Start with a packet with a pre-defined/dummy content. Then, fill
5739 * in the header values to be looked up or matched.
5741 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5743 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5745 for (i = 0; i < lkups_cnt; i++) {
5746 enum ice_protocol_type type;
5747 u16 offset = 0, len = 0, j;
5750 /* find the start of this layer; it should be found since this
5751 * was already checked when search for the dummy packet
5753 type = lkups[i].type;
5754 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5755 if (type == offsets[j].type) {
5756 offset = offsets[j].offset;
5761 /* this should never happen in a correct calling sequence */
5763 return ICE_ERR_PARAM;
5765 switch (lkups[i].type) {
5768 len = sizeof(struct ice_ether_hdr);
5771 len = sizeof(struct ice_ethtype_hdr);
5775 len = sizeof(struct ice_ipv4_hdr);
5779 len = sizeof(struct ice_ipv6_hdr);
5784 len = sizeof(struct ice_l4_hdr);
5787 len = sizeof(struct ice_sctp_hdr);
5790 len = sizeof(struct ice_nvgre);
5795 len = sizeof(struct ice_udp_tnl_hdr);
5799 len = sizeof(struct ice_udp_gtp_hdr);
5802 return ICE_ERR_PARAM;
5805 /* the length should be a word multiple */
5806 if (len % ICE_BYTES_PER_WORD)
5809 /* We have the offset to the header start, the length, the
5810 * caller's header values and mask. Use this information to
5811 * copy the data into the dummy packet appropriately based on
5812 * the mask. Note that we need to only write the bits as
5813 * indicated by the mask to make sure we don't improperly write
5814 * over any significant packet data.
5816 for (j = 0; j < len / sizeof(u16); j++)
5817 if (((u16 *)&lkups[i].m_u)[j])
5818 ((u16 *)(pkt + offset))[j] =
5819 (((u16 *)(pkt + offset))[j] &
5820 ~((u16 *)&lkups[i].m_u)[j]) |
5821 (((u16 *)&lkups[i].h_u)[j] &
5822 ((u16 *)&lkups[i].m_u)[j]);
5825 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5831 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5832 * @hw: pointer to the hardware structure
5833 * @tun_type: tunnel type
5834 * @pkt: dummy packet to fill in
5835 * @offsets: offset info for the dummy packet
5837 static enum ice_status
5838 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5839 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5844 case ICE_SW_TUN_AND_NON_TUN:
5845 case ICE_SW_TUN_VXLAN_GPE:
5846 case ICE_SW_TUN_VXLAN:
5847 case ICE_SW_TUN_UDP:
5848 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
5852 case ICE_SW_TUN_GENEVE:
5853 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
5858 /* Nothing needs to be done for this tunnel type */
5862 /* Find the outer UDP protocol header and insert the port number */
5863 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5864 if (offsets[i].type == ICE_UDP_OF) {
5865 struct ice_l4_hdr *hdr;
5868 offset = offsets[i].offset;
5869 hdr = (struct ice_l4_hdr *)&pkt[offset];
5870 hdr->dst_port = open_port << 8 | open_port >> 8;
5880 * ice_find_adv_rule_entry - Search a rule entry
5881 * @hw: pointer to the hardware structure
5882 * @lkups: lookup elements or match criteria for the advanced recipe, one
5883 * structure per protocol header
5884 * @lkups_cnt: number of protocols
5885 * @recp_id: recipe ID for which we are finding the rule
5886 * @rinfo: other information regarding the rule e.g. priority and action info
5888 * Helper function to search for a given advance rule entry
5889 * Returns pointer to entry storing the rule if found
5891 static struct ice_adv_fltr_mgmt_list_entry *
5892 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5893 u16 lkups_cnt, u8 recp_id,
5894 struct ice_adv_rule_info *rinfo)
5896 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5897 struct ice_switch_info *sw = hw->switch_info;
5900 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5901 ice_adv_fltr_mgmt_list_entry, list_entry) {
5902 bool lkups_matched = true;
5904 if (lkups_cnt != list_itr->lkups_cnt)
5906 for (i = 0; i < list_itr->lkups_cnt; i++)
5907 if (memcmp(&list_itr->lkups[i], &lkups[i],
5909 lkups_matched = false;
5912 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5913 rinfo->tun_type == list_itr->rule_info.tun_type &&
5921 * ice_adv_add_update_vsi_list
5922 * @hw: pointer to the hardware structure
5923 * @m_entry: pointer to current adv filter management list entry
5924 * @cur_fltr: filter information from the book keeping entry
5925 * @new_fltr: filter information with the new VSI to be added
5927 * Call AQ command to add or update previously created VSI list with new VSI.
5929 * Helper function to do book keeping associated with adding filter information
5930 * The algorithm to do the booking keeping is described below :
5931 * When a VSI needs to subscribe to a given advanced filter
5932 * if only one VSI has been added till now
5933 * Allocate a new VSI list and add two VSIs
5934 * to this list using switch rule command
5935 * Update the previously created switch rule with the
5936 * newly created VSI list ID
5937 * if a VSI list was previously created
5938 * Add the new VSI to the previously created VSI list set
5939 * using the update switch rule command
5941 static enum ice_status
5942 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5943 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5944 struct ice_adv_rule_info *cur_fltr,
5945 struct ice_adv_rule_info *new_fltr)
5947 enum ice_status status;
5948 u16 vsi_list_id = 0;
5950 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5951 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5952 return ICE_ERR_NOT_IMPL;
5954 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5955 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5956 return ICE_ERR_ALREADY_EXISTS;
5958 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5959 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5960 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5961 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5962 return ICE_ERR_NOT_IMPL;
5964 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5965 /* Only one entry existed in the mapping and it was not already
5966 * a part of a VSI list. So, create a VSI list with the old and
5969 struct ice_fltr_info tmp_fltr;
5970 u16 vsi_handle_arr[2];
5972 /* A rule already exists with the new VSI being added */
5973 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5974 new_fltr->sw_act.fwd_id.hw_vsi_id)
5975 return ICE_ERR_ALREADY_EXISTS;
5977 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5978 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5979 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5985 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5986 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5987 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5988 /* Update the previous switch rule of "forward to VSI" to
5991 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5995 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5996 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5997 m_entry->vsi_list_info =
5998 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6001 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6003 if (!m_entry->vsi_list_info)
6006 /* A rule already exists with the new VSI being added */
6007 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6010 /* Update the previously created VSI list set with
6011 * the new VSI ID passed in
6013 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6015 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6017 ice_aqc_opc_update_sw_rules,
6019 /* update VSI list mapping info with new VSI ID */
6021 ice_set_bit(vsi_handle,
6022 m_entry->vsi_list_info->vsi_map);
6025 m_entry->vsi_count++;
6030 * ice_add_adv_rule - helper function to create an advanced switch rule
6031 * @hw: pointer to the hardware structure
6032 * @lkups: information on the words that needs to be looked up. All words
6033 * together makes one recipe
6034 * @lkups_cnt: num of entries in the lkups array
6035 * @rinfo: other information related to the rule that needs to be programmed
6036 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6037 * ignored is case of error.
6039 * This function can program only 1 rule at a time. The lkups is used to
6040 * describe the all the words that forms the "lookup" portion of the recipe.
6041 * These words can span multiple protocols. Callers to this function need to
6042 * pass in a list of protocol headers with lookup information along and mask
6043 * that determines which words are valid from the given protocol header.
6044 * rinfo describes other information related to this rule such as forwarding
6045 * IDs, priority of this rule, etc.
6048 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6049 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6050 struct ice_rule_query_data *added_entry)
6052 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6053 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6054 const struct ice_dummy_pkt_offsets *pkt_offsets;
6055 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6056 struct LIST_HEAD_TYPE *rule_head;
6057 struct ice_switch_info *sw;
6058 enum ice_status status;
6059 const u8 *pkt = NULL;
6065 return ICE_ERR_PARAM;
6067 /* get # of words we need to match */
6069 for (i = 0; i < lkups_cnt; i++) {
6072 ptr = (u16 *)&lkups[i].m_u;
6073 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6077 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6078 return ICE_ERR_PARAM;
6080 /* make sure that we can locate a dummy packet */
6081 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6084 status = ICE_ERR_PARAM;
6085 goto err_ice_add_adv_rule;
6088 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6089 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6090 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6091 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6094 vsi_handle = rinfo->sw_act.vsi_handle;
6095 if (!ice_is_vsi_valid(hw, vsi_handle))
6096 return ICE_ERR_PARAM;
6098 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6099 rinfo->sw_act.fwd_id.hw_vsi_id =
6100 ice_get_hw_vsi_num(hw, vsi_handle);
6101 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6102 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6104 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6107 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6109 /* we have to add VSI to VSI_LIST and increment vsi_count.
6110 * Also Update VSI list so that we can change forwarding rule
6111 * if the rule already exists, we will check if it exists with
6112 * same vsi_id, if not then add it to the VSI list if it already
6113 * exists if not then create a VSI list and add the existing VSI
6114 * ID and the new VSI ID to the list
6115 * We will add that VSI to the list
6117 status = ice_adv_add_update_vsi_list(hw, m_entry,
6118 &m_entry->rule_info,
6121 added_entry->rid = rid;
6122 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6123 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6127 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6128 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6130 return ICE_ERR_NO_MEMORY;
6131 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6132 switch (rinfo->sw_act.fltr_act) {
6133 case ICE_FWD_TO_VSI:
6134 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6135 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6136 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6139 act |= ICE_SINGLE_ACT_TO_Q;
6140 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6141 ICE_SINGLE_ACT_Q_INDEX_M;
6143 case ICE_FWD_TO_QGRP:
6144 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6145 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6146 act |= ICE_SINGLE_ACT_TO_Q;
6147 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6148 ICE_SINGLE_ACT_Q_INDEX_M;
6149 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6150 ICE_SINGLE_ACT_Q_REGION_M;
6152 case ICE_DROP_PACKET:
6153 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6154 ICE_SINGLE_ACT_VALID_BIT;
6157 status = ICE_ERR_CFG;
6158 goto err_ice_add_adv_rule;
6161 /* set the rule LOOKUP type based on caller specified 'RX'
6162 * instead of hardcoding it to be either LOOKUP_TX/RX
6164 * for 'RX' set the source to be the port number
6165 * for 'TX' set the source to be the source HW VSI number (determined
6169 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6170 s_rule->pdata.lkup_tx_rx.src =
6171 CPU_TO_LE16(hw->port_info->lport);
6173 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6174 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6177 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6178 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6180 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
6183 if (rinfo->tun_type != ICE_NON_TUN) {
6184 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6185 s_rule->pdata.lkup_tx_rx.hdr,
6188 goto err_ice_add_adv_rule;
6191 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6192 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6195 goto err_ice_add_adv_rule;
6196 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6197 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6199 status = ICE_ERR_NO_MEMORY;
6200 goto err_ice_add_adv_rule;
6203 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6204 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6205 ICE_NONDMA_TO_NONDMA);
6206 if (!adv_fltr->lkups) {
6207 status = ICE_ERR_NO_MEMORY;
6208 goto err_ice_add_adv_rule;
6211 adv_fltr->lkups_cnt = lkups_cnt;
6212 adv_fltr->rule_info = *rinfo;
6213 adv_fltr->rule_info.fltr_rule_id =
6214 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6215 sw = hw->switch_info;
6216 sw->recp_list[rid].adv_rule = true;
6217 rule_head = &sw->recp_list[rid].filt_rules;
6219 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6220 struct ice_fltr_info tmp_fltr;
6222 tmp_fltr.fltr_rule_id =
6223 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6224 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6225 tmp_fltr.fwd_id.hw_vsi_id =
6226 ice_get_hw_vsi_num(hw, vsi_handle);
6227 tmp_fltr.vsi_handle = vsi_handle;
6228 /* Update the previous switch rule of "forward to VSI" to
6231 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6233 goto err_ice_add_adv_rule;
6234 adv_fltr->vsi_count = 1;
6237 /* Add rule entry to book keeping list */
6238 LIST_ADD(&adv_fltr->list_entry, rule_head);
6240 added_entry->rid = rid;
6241 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6242 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6244 err_ice_add_adv_rule:
6245 if (status && adv_fltr) {
6246 ice_free(hw, adv_fltr->lkups);
6247 ice_free(hw, adv_fltr);
6250 ice_free(hw, s_rule);
6256 * ice_adv_rem_update_vsi_list
6257 * @hw: pointer to the hardware structure
6258 * @vsi_handle: VSI handle of the VSI to remove
6259 * @fm_list: filter management entry for which the VSI list management needs to
6262 static enum ice_status
6263 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6264 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6266 struct ice_vsi_list_map_info *vsi_list_info;
6267 enum ice_sw_lkup_type lkup_type;
6268 enum ice_status status;
6271 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6272 fm_list->vsi_count == 0)
6273 return ICE_ERR_PARAM;
6275 /* A rule with the VSI being removed does not exist */
6276 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6277 return ICE_ERR_DOES_NOT_EXIST;
6279 lkup_type = ICE_SW_LKUP_LAST;
6280 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6281 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6282 ice_aqc_opc_update_sw_rules,
6287 fm_list->vsi_count--;
6288 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6289 vsi_list_info = fm_list->vsi_list_info;
6290 if (fm_list->vsi_count == 1) {
6291 struct ice_fltr_info tmp_fltr;
6294 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6296 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6297 return ICE_ERR_OUT_OF_RANGE;
6299 /* Make sure VSI list is empty before removing it below */
6300 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6302 ice_aqc_opc_update_sw_rules,
6306 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6307 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6308 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6309 tmp_fltr.fwd_id.hw_vsi_id =
6310 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6311 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6312 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6314 /* Update the previous switch rule of "MAC forward to VSI" to
6315 * "MAC fwd to VSI list"
6317 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6319 ice_debug(hw, ICE_DBG_SW,
6320 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6321 tmp_fltr.fwd_id.hw_vsi_id, status);
6325 /* Remove the VSI list since it is no longer used */
6326 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6328 ice_debug(hw, ICE_DBG_SW,
6329 "Failed to remove VSI list %d, error %d\n",
6330 vsi_list_id, status);
6334 LIST_DEL(&vsi_list_info->list_entry);
6335 ice_free(hw, vsi_list_info);
6336 fm_list->vsi_list_info = NULL;
6343 * ice_rem_adv_rule - removes existing advanced switch rule
6344 * @hw: pointer to the hardware structure
6345 * @lkups: information on the words that needs to be looked up. All words
6346 * together makes one recipe
6347 * @lkups_cnt: num of entries in the lkups array
6348 * @rinfo: Its the pointer to the rule information for the rule
6350 * This function can be used to remove 1 rule at a time. The lkups is
6351 * used to describe all the words that forms the "lookup" portion of the
6352 * rule. These words can span multiple protocols. Callers to this function
6353 * need to pass in a list of protocol headers with lookup information along
6354 * and mask that determines which words are valid from the given protocol
6355 * header. rinfo describes other information related to this rule such as
6356 * forwarding IDs, priority of this rule, etc.
6359 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6360 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6362 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6363 struct ice_prot_lkup_ext lkup_exts;
6364 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6365 enum ice_status status = ICE_SUCCESS;
6366 bool remove_rule = false;
6367 u16 i, rid, vsi_handle;
6369 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6370 for (i = 0; i < lkups_cnt; i++) {
6373 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6376 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6381 /* Create any special protocol/offset pairs, such as looking at tunnel
6382 * bits by extracting metadata
6384 status = ice_add_special_words(rinfo, &lkup_exts);
6388 rid = ice_find_recp(hw, &lkup_exts);
6389 /* If did not find a recipe that match the existing criteria */
6390 if (rid == ICE_MAX_NUM_RECIPES)
6391 return ICE_ERR_PARAM;
6393 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6394 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6395 /* the rule is already removed */
6398 ice_acquire_lock(rule_lock);
6399 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6401 } else if (list_elem->vsi_count > 1) {
6402 list_elem->vsi_list_info->ref_cnt--;
6403 remove_rule = false;
6404 vsi_handle = rinfo->sw_act.vsi_handle;
6405 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6407 vsi_handle = rinfo->sw_act.vsi_handle;
6408 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6410 ice_release_lock(rule_lock);
6413 if (list_elem->vsi_count == 0)
6416 ice_release_lock(rule_lock);
6418 struct ice_aqc_sw_rules_elem *s_rule;
6421 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6423 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6426 return ICE_ERR_NO_MEMORY;
6427 s_rule->pdata.lkup_tx_rx.act = 0;
6428 s_rule->pdata.lkup_tx_rx.index =
6429 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6430 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6431 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6433 ice_aqc_opc_remove_sw_rules, NULL);
6434 if (status == ICE_SUCCESS) {
6435 ice_acquire_lock(rule_lock);
6436 LIST_DEL(&list_elem->list_entry);
6437 ice_free(hw, list_elem->lkups);
6438 ice_free(hw, list_elem);
6439 ice_release_lock(rule_lock);
6441 ice_free(hw, s_rule);
6447 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6448 * @hw: pointer to the hardware structure
6449 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6451 * This function is used to remove 1 rule at a time. The removal is based on
6452 * the remove_entry parameter. This function will remove rule for a given
6453 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6456 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6457 struct ice_rule_query_data *remove_entry)
6459 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6460 struct LIST_HEAD_TYPE *list_head;
6461 struct ice_adv_rule_info rinfo;
6462 struct ice_switch_info *sw;
6464 sw = hw->switch_info;
6465 if (!sw->recp_list[remove_entry->rid].recp_created)
6466 return ICE_ERR_PARAM;
6467 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6468 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6470 if (list_itr->rule_info.fltr_rule_id ==
6471 remove_entry->rule_id) {
6472 rinfo = list_itr->rule_info;
6473 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6474 return ice_rem_adv_rule(hw, list_itr->lkups,
6475 list_itr->lkups_cnt, &rinfo);
6478 return ICE_ERR_PARAM;
6482 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6484 * @hw: pointer to the hardware structure
6485 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6487 * This function is used to remove all the rules for a given VSI and as soon
6488 * as removing a rule fails, it will return immediately with the error code,
6489 * else it will return ICE_SUCCESS
6492 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6494 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6495 struct ice_vsi_list_map_info *map_info;
6496 struct LIST_HEAD_TYPE *list_head;
6497 struct ice_adv_rule_info rinfo;
6498 struct ice_switch_info *sw;
6499 enum ice_status status;
6500 u16 vsi_list_id = 0;
6503 sw = hw->switch_info;
6504 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6505 if (!sw->recp_list[rid].recp_created)
6507 if (!sw->recp_list[rid].adv_rule)
6509 list_head = &sw->recp_list[rid].filt_rules;
6511 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6512 ice_adv_fltr_mgmt_list_entry, list_entry) {
6513 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6517 rinfo = list_itr->rule_info;
6518 rinfo.sw_act.vsi_handle = vsi_handle;
6519 status = ice_rem_adv_rule(hw, list_itr->lkups,
6520 list_itr->lkups_cnt, &rinfo);
6530 * ice_replay_fltr - Replay all the filters stored by a specific list head
6531 * @hw: pointer to the hardware structure
6532 * @list_head: list for which filters needs to be replayed
6533 * @recp_id: Recipe ID for which rules need to be replayed
6535 static enum ice_status
6536 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6538 struct ice_fltr_mgmt_list_entry *itr;
6539 struct LIST_HEAD_TYPE l_head;
6540 enum ice_status status = ICE_SUCCESS;
6542 if (LIST_EMPTY(list_head))
6545 /* Move entries from the given list_head to a temporary l_head so that
6546 * they can be replayed. Otherwise when trying to re-add the same
6547 * filter, the function will return already exists
6549 LIST_REPLACE_INIT(list_head, &l_head);
6551 /* Mark the given list_head empty by reinitializing it so filters
6552 * could be added again by *handler
6554 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6556 struct ice_fltr_list_entry f_entry;
6558 f_entry.fltr_info = itr->fltr_info;
6559 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6560 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6561 if (status != ICE_SUCCESS)
6566 /* Add a filter per VSI separately */
6571 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6573 if (!ice_is_vsi_valid(hw, vsi_handle))
6576 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6577 f_entry.fltr_info.vsi_handle = vsi_handle;
6578 f_entry.fltr_info.fwd_id.hw_vsi_id =
6579 ice_get_hw_vsi_num(hw, vsi_handle);
6580 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6581 if (recp_id == ICE_SW_LKUP_VLAN)
6582 status = ice_add_vlan_internal(hw, &f_entry);
6584 status = ice_add_rule_internal(hw, recp_id,
6586 if (status != ICE_SUCCESS)
6591 /* Clear the filter management list */
6592 ice_rem_sw_rule_info(hw, &l_head);
6597 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6598 * @hw: pointer to the hardware structure
6600 * NOTE: This function does not clean up partially added filters on error.
6601 * It is up to caller of the function to issue a reset or fail early.
6603 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6605 struct ice_switch_info *sw = hw->switch_info;
6606 enum ice_status status = ICE_SUCCESS;
6609 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6610 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6612 status = ice_replay_fltr(hw, i, head);
6613 if (status != ICE_SUCCESS)
6620 * ice_replay_vsi_fltr - Replay filters for requested VSI
6621 * @hw: pointer to the hardware structure
6622 * @vsi_handle: driver VSI handle
6623 * @recp_id: Recipe ID for which rules need to be replayed
6624 * @list_head: list for which filters need to be replayed
6626 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6627 * It is required to pass valid VSI handle.
6629 static enum ice_status
6630 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6631 struct LIST_HEAD_TYPE *list_head)
6633 struct ice_fltr_mgmt_list_entry *itr;
6634 enum ice_status status = ICE_SUCCESS;
6637 if (LIST_EMPTY(list_head))
6639 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6641 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6643 struct ice_fltr_list_entry f_entry;
6645 f_entry.fltr_info = itr->fltr_info;
6646 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6647 itr->fltr_info.vsi_handle == vsi_handle) {
6648 /* update the src in case it is VSI num */
6649 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6650 f_entry.fltr_info.src = hw_vsi_id;
6651 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6652 if (status != ICE_SUCCESS)
6656 if (!itr->vsi_list_info ||
6657 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6659 /* Clearing it so that the logic can add it back */
6660 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6661 f_entry.fltr_info.vsi_handle = vsi_handle;
6662 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6663 /* update the src in case it is VSI num */
6664 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6665 f_entry.fltr_info.src = hw_vsi_id;
6666 if (recp_id == ICE_SW_LKUP_VLAN)
6667 status = ice_add_vlan_internal(hw, &f_entry);
6669 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6670 if (status != ICE_SUCCESS)
6678 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6679 * @hw: pointer to the hardware structure
6680 * @vsi_handle: driver VSI handle
6681 * @list_head: list for which filters need to be replayed
6683 * Replay the advanced rule for the given VSI.
6685 static enum ice_status
6686 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6687 struct LIST_HEAD_TYPE *list_head)
6689 struct ice_rule_query_data added_entry = { 0 };
6690 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6691 enum ice_status status = ICE_SUCCESS;
6693 if (LIST_EMPTY(list_head))
6695 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6697 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6698 u16 lk_cnt = adv_fltr->lkups_cnt;
6700 if (vsi_handle != rinfo->sw_act.vsi_handle)
6702 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6711 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6712 * @hw: pointer to the hardware structure
6713 * @vsi_handle: driver VSI handle
6715 * Replays filters for requested VSI via vsi_handle.
6717 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6719 struct ice_switch_info *sw = hw->switch_info;
6720 enum ice_status status;
6723 /* Update the recipes that were created */
6724 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6725 struct LIST_HEAD_TYPE *head;
6727 head = &sw->recp_list[i].filt_replay_rules;
6728 if (!sw->recp_list[i].adv_rule)
6729 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6731 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6732 if (status != ICE_SUCCESS)
6740 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6741 * @hw: pointer to the HW struct
6743 * Deletes the filter replay rules.
6745 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6747 struct ice_switch_info *sw = hw->switch_info;
6753 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6754 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6755 struct LIST_HEAD_TYPE *l_head;
6757 l_head = &sw->recp_list[i].filt_replay_rules;
6758 if (!sw->recp_list[i].adv_rule)
6759 ice_rem_sw_rule_info(hw, l_head);
6761 ice_rem_adv_rule_info(hw, l_head);