1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
18 #include "ice_ethdev.h"
19 #include "ice_generic_flow.h"
20 #include "ice_switch_filter.h"
22 static int ice_flow_validate(struct rte_eth_dev *dev,
23 const struct rte_flow_attr *attr,
24 const struct rte_flow_item pattern[],
25 const struct rte_flow_action actions[],
26 struct rte_flow_error *error);
27 static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
28 const struct rte_flow_attr *attr,
29 const struct rte_flow_item pattern[],
30 const struct rte_flow_action actions[],
31 struct rte_flow_error *error);
32 static int ice_flow_destroy(struct rte_eth_dev *dev,
33 struct rte_flow *flow,
34 struct rte_flow_error *error);
35 static int ice_flow_flush(struct rte_eth_dev *dev,
36 struct rte_flow_error *error);
38 const struct rte_flow_ops ice_flow_ops = {
39 .validate = ice_flow_validate,
40 .create = ice_flow_create,
41 .destroy = ice_flow_destroy,
42 .flush = ice_flow_flush,
46 ice_flow_valid_attr(const struct rte_flow_attr *attr,
47 struct rte_flow_error *error)
49 /* Must be input direction */
51 rte_flow_error_set(error, EINVAL,
52 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
53 attr, "Only support ingress.");
59 rte_flow_error_set(error, EINVAL,
60 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
61 attr, "Not support egress.");
67 rte_flow_error_set(error, EINVAL,
68 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
69 attr, "Not support priority.");
75 rte_flow_error_set(error, EINVAL,
76 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
77 attr, "Not support group.");
84 /* Find the first VOID or non-VOID item pointer */
85 static const struct rte_flow_item *
86 ice_find_first_item(const struct rte_flow_item *item, bool is_void)
90 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
92 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
94 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
102 /* Skip all VOID items of the pattern */
104 ice_pattern_skip_void_item(struct rte_flow_item *items,
105 const struct rte_flow_item *pattern)
107 uint32_t cpy_count = 0;
108 const struct rte_flow_item *pb = pattern, *pe = pattern;
111 /* Find a non-void item first */
112 pb = ice_find_first_item(pb, false);
113 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
118 /* Find a void item */
119 pe = ice_find_first_item(pb + 1, true);
122 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
126 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
133 /* Copy the END item. */
134 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
137 /* Check if the pattern matches a supported item type array */
139 ice_match_pattern(enum rte_flow_item_type *item_array,
140 const struct rte_flow_item *pattern)
142 const struct rte_flow_item *item = pattern;
144 while ((*item_array == item->type) &&
145 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
150 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
151 item->type == RTE_FLOW_ITEM_TYPE_END);
154 static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
155 struct rte_flow_error *error)
159 struct rte_flow_item *items; /* used for pattern without VOID items */
160 uint32_t item_num = 0; /* non-void item number */
162 /* Get the non-void item number of pattern */
163 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
164 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
170 items = rte_zmalloc("ice_pattern",
171 item_num * sizeof(struct rte_flow_item), 0);
173 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
174 NULL, "No memory for PMD internal items.");
178 ice_pattern_skip_void_item(items, pattern);
180 for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
181 if (ice_match_pattern(ice_supported_patterns[i].items,
183 inset = ice_supported_patterns[i].sw_fields;
187 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
188 pattern, "Unsupported pattern");
194 static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
195 struct rte_flow_error *error)
197 const struct rte_flow_item *item = pattern;
198 const struct rte_flow_item_eth *eth_spec, *eth_mask;
199 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
200 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
201 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
202 const struct rte_flow_item_udp *udp_spec, *udp_mask;
203 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
204 const struct rte_flow_item_icmp *icmp_mask;
205 const struct rte_flow_item_icmp6 *icmp6_mask;
206 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
207 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
208 enum rte_flow_item_type item_type;
209 uint8_t ipv6_addr_mask[16] = {
210 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
211 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
212 uint64_t input_set = ICE_INSET_NONE;
213 bool is_tunnel = false;
215 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
217 rte_flow_error_set(error, EINVAL,
218 RTE_FLOW_ERROR_TYPE_ITEM,
220 "Not support range");
223 item_type = item->type;
225 case RTE_FLOW_ITEM_TYPE_ETH:
226 eth_spec = item->spec;
227 eth_mask = item->mask;
229 if (eth_spec && eth_mask) {
230 if (rte_is_broadcast_ether_addr(ð_mask->src))
231 input_set |= ICE_INSET_SMAC;
232 if (rte_is_broadcast_ether_addr(ð_mask->dst))
233 input_set |= ICE_INSET_DMAC;
234 if (eth_mask->type == RTE_BE16(0xffff))
235 input_set |= ICE_INSET_ETHERTYPE;
238 case RTE_FLOW_ITEM_TYPE_IPV4:
239 ipv4_spec = item->spec;
240 ipv4_mask = item->mask;
242 if (!(ipv4_spec && ipv4_mask))
245 /* Check IPv4 mask and update input set */
246 if (ipv4_mask->hdr.version_ihl ||
247 ipv4_mask->hdr.total_length ||
248 ipv4_mask->hdr.packet_id ||
249 ipv4_mask->hdr.hdr_checksum) {
250 rte_flow_error_set(error, EINVAL,
251 RTE_FLOW_ERROR_TYPE_ITEM,
253 "Invalid IPv4 mask.");
258 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
259 input_set |= ICE_INSET_TUN_IPV4_SRC;
260 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
261 input_set |= ICE_INSET_TUN_IPV4_DST;
262 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
263 input_set |= ICE_INSET_TUN_IPV4_TTL;
264 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
265 input_set |= ICE_INSET_TUN_IPV4_PROTO;
267 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
268 input_set |= ICE_INSET_IPV4_SRC;
269 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
270 input_set |= ICE_INSET_IPV4_DST;
271 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
272 input_set |= ICE_INSET_IPV4_TTL;
273 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
274 input_set |= ICE_INSET_IPV4_PROTO;
275 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
276 input_set |= ICE_INSET_IPV4_TOS;
279 case RTE_FLOW_ITEM_TYPE_IPV6:
280 ipv6_spec = item->spec;
281 ipv6_mask = item->mask;
283 if (!(ipv6_spec && ipv6_mask))
286 if (ipv6_mask->hdr.payload_len) {
287 rte_flow_error_set(error, EINVAL,
288 RTE_FLOW_ERROR_TYPE_ITEM,
290 "Invalid IPv6 mask");
295 if (!memcmp(ipv6_mask->hdr.src_addr,
297 RTE_DIM(ipv6_mask->hdr.src_addr)))
298 input_set |= ICE_INSET_TUN_IPV6_SRC;
299 if (!memcmp(ipv6_mask->hdr.dst_addr,
301 RTE_DIM(ipv6_mask->hdr.dst_addr)))
302 input_set |= ICE_INSET_TUN_IPV6_DST;
303 if (ipv6_mask->hdr.proto == UINT8_MAX)
304 input_set |= ICE_INSET_TUN_IPV6_PROTO;
305 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
306 input_set |= ICE_INSET_TUN_IPV6_TTL;
308 if (!memcmp(ipv6_mask->hdr.src_addr,
310 RTE_DIM(ipv6_mask->hdr.src_addr)))
311 input_set |= ICE_INSET_IPV6_SRC;
312 if (!memcmp(ipv6_mask->hdr.dst_addr,
314 RTE_DIM(ipv6_mask->hdr.dst_addr)))
315 input_set |= ICE_INSET_IPV6_DST;
316 if (ipv6_mask->hdr.proto == UINT8_MAX)
317 input_set |= ICE_INSET_IPV6_PROTO;
318 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
319 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
320 if ((ipv6_mask->hdr.vtc_flow &
321 rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
323 (RTE_IPV6_HDR_TC_MASK))
324 input_set |= ICE_INSET_IPV6_TOS;
328 case RTE_FLOW_ITEM_TYPE_UDP:
329 udp_spec = item->spec;
330 udp_mask = item->mask;
332 if (!(udp_spec && udp_mask))
335 /* Check UDP mask and update input set*/
336 if (udp_mask->hdr.dgram_len ||
337 udp_mask->hdr.dgram_cksum) {
338 rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_ITEM,
346 if (udp_mask->hdr.src_port == UINT16_MAX)
347 input_set |= ICE_INSET_TUN_SRC_PORT;
348 if (udp_mask->hdr.dst_port == UINT16_MAX)
349 input_set |= ICE_INSET_TUN_DST_PORT;
351 if (udp_mask->hdr.src_port == UINT16_MAX)
352 input_set |= ICE_INSET_SRC_PORT;
353 if (udp_mask->hdr.dst_port == UINT16_MAX)
354 input_set |= ICE_INSET_DST_PORT;
358 case RTE_FLOW_ITEM_TYPE_TCP:
359 tcp_spec = item->spec;
360 tcp_mask = item->mask;
362 if (!(tcp_spec && tcp_mask))
365 /* Check TCP mask and update input set */
366 if (tcp_mask->hdr.sent_seq ||
367 tcp_mask->hdr.recv_ack ||
368 tcp_mask->hdr.data_off ||
369 tcp_mask->hdr.tcp_flags ||
370 tcp_mask->hdr.rx_win ||
371 tcp_mask->hdr.cksum ||
372 tcp_mask->hdr.tcp_urp) {
373 rte_flow_error_set(error, EINVAL,
374 RTE_FLOW_ERROR_TYPE_ITEM,
381 if (tcp_mask->hdr.src_port == UINT16_MAX)
382 input_set |= ICE_INSET_TUN_SRC_PORT;
383 if (tcp_mask->hdr.dst_port == UINT16_MAX)
384 input_set |= ICE_INSET_TUN_DST_PORT;
386 if (tcp_mask->hdr.src_port == UINT16_MAX)
387 input_set |= ICE_INSET_SRC_PORT;
388 if (tcp_mask->hdr.dst_port == UINT16_MAX)
389 input_set |= ICE_INSET_DST_PORT;
393 case RTE_FLOW_ITEM_TYPE_SCTP:
394 sctp_spec = item->spec;
395 sctp_mask = item->mask;
397 if (!(sctp_spec && sctp_mask))
400 /* Check SCTP mask and update input set */
401 if (sctp_mask->hdr.cksum) {
402 rte_flow_error_set(error, EINVAL,
403 RTE_FLOW_ERROR_TYPE_ITEM,
405 "Invalid SCTP mask");
410 if (sctp_mask->hdr.src_port == UINT16_MAX)
411 input_set |= ICE_INSET_TUN_SRC_PORT;
412 if (sctp_mask->hdr.dst_port == UINT16_MAX)
413 input_set |= ICE_INSET_TUN_DST_PORT;
415 if (sctp_mask->hdr.src_port == UINT16_MAX)
416 input_set |= ICE_INSET_SRC_PORT;
417 if (sctp_mask->hdr.dst_port == UINT16_MAX)
418 input_set |= ICE_INSET_DST_PORT;
422 case RTE_FLOW_ITEM_TYPE_ICMP:
423 icmp_mask = item->mask;
424 if (icmp_mask->hdr.icmp_code ||
425 icmp_mask->hdr.icmp_cksum ||
426 icmp_mask->hdr.icmp_ident ||
427 icmp_mask->hdr.icmp_seq_nb) {
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_ITEM,
431 "Invalid ICMP mask");
435 if (icmp_mask->hdr.icmp_type == UINT8_MAX)
436 input_set |= ICE_INSET_ICMP;
438 case RTE_FLOW_ITEM_TYPE_ICMP6:
439 icmp6_mask = item->mask;
440 if (icmp6_mask->code ||
441 icmp6_mask->checksum) {
442 rte_flow_error_set(error, EINVAL,
443 RTE_FLOW_ERROR_TYPE_ITEM,
445 "Invalid ICMP6 mask");
449 if (icmp6_mask->type == UINT8_MAX)
450 input_set |= ICE_INSET_ICMP6;
452 case RTE_FLOW_ITEM_TYPE_VXLAN:
453 vxlan_spec = item->spec;
454 vxlan_mask = item->mask;
455 /* Check if VXLAN item is used to describe protocol.
456 * If yes, both spec and mask should be NULL.
457 * If no, both spec and mask shouldn't be NULL.
459 if ((!vxlan_spec && vxlan_mask) ||
460 (vxlan_spec && !vxlan_mask)) {
461 rte_flow_error_set(error, EINVAL,
462 RTE_FLOW_ERROR_TYPE_ITEM,
464 "Invalid VXLAN item");
467 if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX &&
468 vxlan_mask->vni[1] == UINT8_MAX &&
469 vxlan_mask->vni[2] == UINT8_MAX)
470 input_set |= ICE_INSET_TUN_ID;
474 case RTE_FLOW_ITEM_TYPE_NVGRE:
475 nvgre_spec = item->spec;
476 nvgre_mask = item->mask;
477 /* Check if NVGRE item is used to describe protocol.
478 * If yes, both spec and mask should be NULL.
479 * If no, both spec and mask shouldn't be NULL.
481 if ((!nvgre_spec && nvgre_mask) ||
482 (nvgre_spec && !nvgre_mask)) {
483 rte_flow_error_set(error, EINVAL,
484 RTE_FLOW_ERROR_TYPE_ITEM,
486 "Invalid NVGRE item");
489 if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX &&
490 nvgre_mask->tni[1] == UINT8_MAX &&
491 nvgre_mask->tni[2] == UINT8_MAX)
492 input_set |= ICE_INSET_TUN_ID;
496 case RTE_FLOW_ITEM_TYPE_VOID:
499 rte_flow_error_set(error, EINVAL,
500 RTE_FLOW_ERROR_TYPE_ITEM,
509 static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
510 uint64_t inset, struct rte_flow_error *error)
514 /* get valid field */
515 fields = ice_get_flow_field(pattern, error);
516 if (!fields || fields & (~inset)) {
517 rte_flow_error_set(error, EINVAL,
518 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
520 "Invalid input set");
527 static int ice_flow_valid_action(struct rte_eth_dev *dev,
528 const struct rte_flow_action *actions,
529 struct rte_flow_error *error)
531 const struct rte_flow_action_queue *act_q;
533 const struct rte_flow_action *action;
534 for (action = actions; action->type !=
535 RTE_FLOW_ACTION_TYPE_END; action++) {
536 switch (action->type) {
537 case RTE_FLOW_ACTION_TYPE_QUEUE:
538 act_q = action->conf;
539 queue = act_q->index;
540 if (queue >= dev->data->nb_rx_queues) {
541 rte_flow_error_set(error, EINVAL,
542 RTE_FLOW_ERROR_TYPE_ACTION,
543 actions, "Invalid queue ID for"
548 case RTE_FLOW_ACTION_TYPE_DROP:
549 case RTE_FLOW_ACTION_TYPE_VOID:
552 rte_flow_error_set(error, EINVAL,
553 RTE_FLOW_ERROR_TYPE_ACTION, actions,
562 ice_flow_validate(struct rte_eth_dev *dev,
563 const struct rte_flow_attr *attr,
564 const struct rte_flow_item pattern[],
565 const struct rte_flow_action actions[],
566 struct rte_flow_error *error)
569 int ret = ICE_ERR_NOT_SUPPORTED;
572 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
573 NULL, "NULL pattern.");
578 rte_flow_error_set(error, EINVAL,
579 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
580 NULL, "NULL action.");
585 rte_flow_error_set(error, EINVAL,
586 RTE_FLOW_ERROR_TYPE_ATTR,
587 NULL, "NULL attribute.");
591 ret = ice_flow_valid_attr(attr, error);
595 inset = ice_flow_valid_pattern(pattern, error);
599 ret = ice_flow_valid_inset(pattern, inset, error);
603 ret = ice_flow_valid_action(dev, actions, error);
610 static struct rte_flow *
611 ice_flow_create(struct rte_eth_dev *dev,
612 const struct rte_flow_attr *attr,
613 const struct rte_flow_item pattern[],
614 const struct rte_flow_action actions[],
615 struct rte_flow_error *error)
617 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
618 struct rte_flow *flow = NULL;
621 flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
623 rte_flow_error_set(error, ENOMEM,
624 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
625 "Failed to allocate memory");
629 ret = ice_flow_validate(dev, attr, pattern, actions, error);
633 ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
637 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
641 rte_flow_error_set(error, -ret,
642 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
643 "Failed to create flow.");
649 ice_flow_destroy(struct rte_eth_dev *dev,
650 struct rte_flow *flow,
651 struct rte_flow_error *error)
653 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
656 ret = ice_destroy_switch_filter(pf, flow, error);
659 TAILQ_REMOVE(&pf->flow_list, flow, node);
662 rte_flow_error_set(error, -ret,
663 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
664 "Failed to destroy flow.");
671 ice_flow_flush(struct rte_eth_dev *dev,
672 struct rte_flow_error *error)
674 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
675 struct rte_flow *p_flow;
679 TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
680 ret = ice_flow_destroy(dev, p_flow, error);
682 rte_flow_error_set(error, -ret,
683 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
684 "Failed to flush SW flows.");