1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
17 #include <rte_malloc.h>
18 #include <rte_eth_ctrl.h>
19 #include <rte_tailq.h>
20 #include <rte_flow_driver.h>
23 #include "base/ice_type.h"
24 #include "ice_switch_filter.h"
27 ice_parse_switch_filter(const struct rte_flow_item pattern[],
28 const struct rte_flow_action actions[],
29 struct rte_flow_error *error,
30 struct ice_adv_lkup_elem *list,
32 enum ice_sw_tunnel_type tun_type)
34 const struct rte_flow_item *item = pattern;
35 enum rte_flow_item_type item_type;
36 const struct rte_flow_item_eth *eth_spec, *eth_mask;
37 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
38 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
39 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
40 const struct rte_flow_item_udp *udp_spec, *udp_mask;
41 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
42 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
43 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
45 uint16_t tunnel_valid = 0;
47 for (item = pattern; item->type !=
48 RTE_FLOW_ITEM_TYPE_END; item++) {
49 item_type = item->type;
52 case RTE_FLOW_ITEM_TYPE_ETH:
53 eth_spec = item->spec;
54 eth_mask = item->mask;
55 if (eth_spec && eth_mask) {
56 list[t].type = (tun_type == ICE_NON_TUN) ?
57 ICE_MAC_OFOS : ICE_MAC_IL;
58 struct ice_ether_hdr *h;
59 struct ice_ether_hdr *m;
61 h = &list[t].h_u.eth_hdr;
62 m = &list[t].m_u.eth_hdr;
63 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
64 if (eth_mask->src.addr_bytes[j] ==
67 eth_spec->src.addr_bytes[j];
69 eth_mask->src.addr_bytes[j];
72 if (eth_mask->dst.addr_bytes[j] ==
75 eth_spec->dst.addr_bytes[j];
77 eth_mask->dst.addr_bytes[j];
83 if (eth_mask->type == UINT16_MAX) {
84 list[t].type = ICE_ETYPE_OL;
85 list[t].h_u.ethertype.ethtype_id =
87 list[t].m_u.ethertype.ethtype_id =
91 } else if (!eth_spec && !eth_mask) {
92 list[t].type = (tun_type == ICE_NON_TUN) ?
93 ICE_MAC_OFOS : ICE_MAC_IL;
97 case RTE_FLOW_ITEM_TYPE_IPV4:
98 ipv4_spec = item->spec;
99 ipv4_mask = item->mask;
100 if (ipv4_spec && ipv4_mask) {
101 list[t].type = (tun_type == ICE_NON_TUN) ?
102 ICE_IPV4_OFOS : ICE_IPV4_IL;
103 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
104 list[t].h_u.ipv4_hdr.src_addr =
105 ipv4_spec->hdr.src_addr;
106 list[t].m_u.ipv4_hdr.src_addr =
109 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
110 list[t].h_u.ipv4_hdr.dst_addr =
111 ipv4_spec->hdr.dst_addr;
112 list[t].m_u.ipv4_hdr.dst_addr =
115 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
116 list[t].h_u.ipv4_hdr.time_to_live =
117 ipv4_spec->hdr.time_to_live;
118 list[t].m_u.ipv4_hdr.time_to_live =
121 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
122 list[t].h_u.ipv4_hdr.protocol =
123 ipv4_spec->hdr.next_proto_id;
124 list[t].m_u.ipv4_hdr.protocol =
127 if (ipv4_mask->hdr.type_of_service ==
129 list[t].h_u.ipv4_hdr.tos =
130 ipv4_spec->hdr.type_of_service;
131 list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
134 } else if (!ipv4_spec && !ipv4_mask) {
135 list[t].type = (tun_type == ICE_NON_TUN) ?
136 ICE_IPV4_OFOS : ICE_IPV4_IL;
140 case RTE_FLOW_ITEM_TYPE_IPV6:
141 ipv6_spec = item->spec;
142 ipv6_mask = item->mask;
143 if (ipv6_spec && ipv6_mask) {
144 list[t].type = (tun_type == ICE_NON_TUN) ?
145 ICE_IPV6_OFOS : ICE_IPV6_IL;
146 struct ice_ipv6_hdr *f;
147 struct ice_ipv6_hdr *s;
148 f = &list[t].h_u.ipv6_hdr;
149 s = &list[t].m_u.ipv6_hdr;
150 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
151 if (ipv6_mask->hdr.src_addr[j] ==
154 ipv6_spec->hdr.src_addr[j];
156 ipv6_mask->hdr.src_addr[j];
158 if (ipv6_mask->hdr.dst_addr[j] ==
161 ipv6_spec->hdr.dst_addr[j];
163 ipv6_mask->hdr.dst_addr[j];
166 if (ipv6_mask->hdr.proto == UINT8_MAX) {
168 ipv6_spec->hdr.proto;
169 s->next_hdr = UINT8_MAX;
171 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
173 ipv6_spec->hdr.hop_limits;
174 s->hop_limit = UINT8_MAX;
177 } else if (!ipv6_spec && !ipv6_mask) {
178 list[t].type = (tun_type == ICE_NON_TUN) ?
179 ICE_IPV4_OFOS : ICE_IPV4_IL;
183 case RTE_FLOW_ITEM_TYPE_UDP:
184 udp_spec = item->spec;
185 udp_mask = item->mask;
186 if (udp_spec && udp_mask) {
187 if (tun_type == ICE_SW_TUN_VXLAN &&
189 list[t].type = ICE_UDP_OF;
191 list[t].type = ICE_UDP_ILOS;
192 if (udp_mask->hdr.src_port == UINT16_MAX) {
193 list[t].h_u.l4_hdr.src_port =
194 udp_spec->hdr.src_port;
195 list[t].m_u.l4_hdr.src_port =
196 udp_mask->hdr.src_port;
198 if (udp_mask->hdr.dst_port == UINT16_MAX) {
199 list[t].h_u.l4_hdr.dst_port =
200 udp_spec->hdr.dst_port;
201 list[t].m_u.l4_hdr.dst_port =
202 udp_mask->hdr.dst_port;
205 } else if (!udp_spec && !udp_mask) {
206 list[t].type = ICE_UDP_ILOS;
210 case RTE_FLOW_ITEM_TYPE_TCP:
211 tcp_spec = item->spec;
212 tcp_mask = item->mask;
213 if (tcp_spec && tcp_mask) {
214 list[t].type = ICE_TCP_IL;
215 if (tcp_mask->hdr.src_port == UINT16_MAX) {
216 list[t].h_u.l4_hdr.src_port =
217 tcp_spec->hdr.src_port;
218 list[t].m_u.l4_hdr.src_port =
219 tcp_mask->hdr.src_port;
221 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
222 list[t].h_u.l4_hdr.dst_port =
223 tcp_spec->hdr.dst_port;
224 list[t].m_u.l4_hdr.dst_port =
225 tcp_mask->hdr.dst_port;
228 } else if (!tcp_spec && !tcp_mask) {
229 list[t].type = ICE_TCP_IL;
233 case RTE_FLOW_ITEM_TYPE_SCTP:
234 sctp_spec = item->spec;
235 sctp_mask = item->mask;
236 if (sctp_spec && sctp_mask) {
237 list[t].type = ICE_SCTP_IL;
238 if (sctp_mask->hdr.src_port == UINT16_MAX) {
239 list[t].h_u.sctp_hdr.src_port =
240 sctp_spec->hdr.src_port;
241 list[t].m_u.sctp_hdr.src_port =
242 sctp_mask->hdr.src_port;
244 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
245 list[t].h_u.sctp_hdr.dst_port =
246 sctp_spec->hdr.dst_port;
247 list[t].m_u.sctp_hdr.dst_port =
248 sctp_mask->hdr.dst_port;
251 } else if (!sctp_spec && !sctp_mask) {
252 list[t].type = ICE_SCTP_IL;
256 case RTE_FLOW_ITEM_TYPE_VXLAN:
257 vxlan_spec = item->spec;
258 vxlan_mask = item->mask;
260 if (vxlan_spec && vxlan_mask) {
261 list[t].type = ICE_VXLAN;
262 if (vxlan_mask->vni[0] == UINT8_MAX &&
263 vxlan_mask->vni[1] == UINT8_MAX &&
264 vxlan_mask->vni[2] == UINT8_MAX) {
265 list[t].h_u.tnl_hdr.vni =
266 (vxlan_spec->vni[2] << 16) |
267 (vxlan_spec->vni[1] << 8) |
269 list[t].m_u.tnl_hdr.vni =
273 } else if (!vxlan_spec && !vxlan_mask) {
274 list[t].type = ICE_VXLAN;
278 case RTE_FLOW_ITEM_TYPE_NVGRE:
279 nvgre_spec = item->spec;
280 nvgre_mask = item->mask;
282 if (nvgre_spec && nvgre_mask) {
283 list[t].type = ICE_NVGRE;
284 if (nvgre_mask->tni[0] == UINT8_MAX &&
285 nvgre_mask->tni[1] == UINT8_MAX &&
286 nvgre_mask->tni[2] == UINT8_MAX) {
287 list[t].h_u.nvgre_hdr.tni_flow =
288 (nvgre_spec->tni[2] << 16) |
289 (nvgre_spec->tni[1] << 8) |
291 list[t].m_u.nvgre_hdr.tni_flow =
295 } else if (!nvgre_spec && !nvgre_mask) {
296 list[t].type = ICE_NVGRE;
300 case RTE_FLOW_ITEM_TYPE_VOID:
301 case RTE_FLOW_ITEM_TYPE_END:
305 rte_flow_error_set(error, EINVAL,
306 RTE_FLOW_ERROR_TYPE_ITEM, actions,
307 "Invalid pattern item.");
319 /* By now ice switch filter action code implement only
320 * supports QUEUE or DROP.
323 ice_parse_switch_action(struct ice_pf *pf,
324 const struct rte_flow_action *actions,
325 struct rte_flow_error *error,
326 struct ice_adv_rule_info *rule_info)
328 struct ice_vsi *vsi = pf->main_vsi;
329 const struct rte_flow_action_queue *act_q;
331 const struct rte_flow_action *action;
332 enum rte_flow_action_type action_type;
334 base_queue = pf->base_queue;
335 for (action = actions; action->type !=
336 RTE_FLOW_ACTION_TYPE_END; action++) {
337 action_type = action->type;
338 switch (action_type) {
339 case RTE_FLOW_ACTION_TYPE_QUEUE:
340 act_q = action->conf;
341 rule_info->sw_act.fltr_act =
343 rule_info->sw_act.fwd_id.q_id =
344 base_queue + act_q->index;
347 case RTE_FLOW_ACTION_TYPE_DROP:
348 rule_info->sw_act.fltr_act =
352 case RTE_FLOW_ACTION_TYPE_VOID:
356 rte_flow_error_set(error,
358 RTE_FLOW_ERROR_TYPE_ITEM,
360 "Invalid action type");
365 rule_info->sw_act.vsi_handle = vsi->idx;
367 rule_info->sw_act.src = vsi->idx;
368 rule_info->priority = 5;
374 ice_switch_rule_set(struct ice_pf *pf,
375 struct ice_adv_lkup_elem *list,
377 struct ice_adv_rule_info *rule_info,
378 struct rte_flow *flow,
379 struct rte_flow_error *error)
381 struct ice_hw *hw = ICE_PF_TO_HW(pf);
383 struct ice_rule_query_data rule_added = {0};
384 struct ice_rule_query_data *filter_ptr;
386 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
387 rte_flow_error_set(error, EINVAL,
388 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
389 "item number too large for rule");
393 rte_flow_error_set(error, EINVAL,
394 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
395 "lookup list should not be NULL");
399 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
402 filter_ptr = rte_zmalloc("ice_switch_filter",
403 sizeof(struct ice_rule_query_data), 0);
405 PMD_DRV_LOG(ERR, "failed to allocate memory");
408 flow->rule = filter_ptr;
409 rte_memcpy(filter_ptr,
411 sizeof(struct ice_rule_query_data));
418 ice_create_switch_filter(struct ice_pf *pf,
419 const struct rte_flow_item pattern[],
420 const struct rte_flow_action actions[],
421 struct rte_flow *flow,
422 struct rte_flow_error *error)
425 struct ice_adv_rule_info rule_info = {0};
426 struct ice_adv_lkup_elem *list = NULL;
427 uint16_t lkups_num = 0;
428 const struct rte_flow_item *item = pattern;
429 uint16_t item_num = 0;
430 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
432 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
434 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
435 tun_type = ICE_SW_TUN_VXLAN;
436 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
437 tun_type = ICE_SW_TUN_NVGRE;
438 /* reserve one more memory slot for ETH which may
439 * consume 2 lookup items.
441 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
444 rule_info.tun_type = tun_type;
446 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
448 rte_flow_error_set(error, EINVAL,
449 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
450 "No memory for PMD internal items");
454 ret = ice_parse_switch_filter(pattern, actions, error,
455 list, &lkups_num, tun_type);
459 ret = ice_parse_switch_action(pf, actions, error, &rule_info);
463 ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
477 ice_destroy_switch_filter(struct ice_pf *pf,
478 struct rte_flow *flow,
479 struct rte_flow_error *error)
481 struct ice_hw *hw = ICE_PF_TO_HW(pf);
483 struct ice_rule_query_data *filter_ptr;
485 filter_ptr = (struct ice_rule_query_data *)
489 rte_flow_error_set(error, EINVAL,
490 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
492 " create by switch filter");
496 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
500 "fail to destroy switch filter rule");
504 rte_free(filter_ptr);
509 ice_free_switch_filter_rule(void *rule)
511 struct ice_rule_query_data *filter_ptr;
513 filter_ptr = (struct ice_rule_query_data *)rule;
515 rte_free(filter_ptr);