1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
18 #include "ice_ethdev.h"
19 #include "ice_generic_flow.h"
20 #include "ice_switch_filter.h"
22 static int ice_flow_validate(struct rte_eth_dev *dev,
23 const struct rte_flow_attr *attr,
24 const struct rte_flow_item pattern[],
25 const struct rte_flow_action actions[],
26 struct rte_flow_error *error);
27 static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
28 const struct rte_flow_attr *attr,
29 const struct rte_flow_item pattern[],
30 const struct rte_flow_action actions[],
31 struct rte_flow_error *error);
32 static int ice_flow_destroy(struct rte_eth_dev *dev,
33 struct rte_flow *flow,
34 struct rte_flow_error *error);
35 static int ice_flow_flush(struct rte_eth_dev *dev,
36 struct rte_flow_error *error);
38 const struct rte_flow_ops ice_flow_ops = {
39 .validate = ice_flow_validate,
40 .create = ice_flow_create,
41 .destroy = ice_flow_destroy,
42 .flush = ice_flow_flush,
46 ice_flow_valid_attr(const struct rte_flow_attr *attr,
47 struct rte_flow_error *error)
49 /* Must be input direction */
51 rte_flow_error_set(error, EINVAL,
52 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
53 attr, "Only support ingress.");
59 rte_flow_error_set(error, EINVAL,
60 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
61 attr, "Not support egress.");
67 rte_flow_error_set(error, EINVAL,
68 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
69 attr, "Not support priority.");
75 rte_flow_error_set(error, EINVAL,
76 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
77 attr, "Not support group.");
84 /* Find the first VOID or non-VOID item pointer */
85 static const struct rte_flow_item *
86 ice_find_first_item(const struct rte_flow_item *item, bool is_void)
90 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
92 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
94 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
102 /* Skip all VOID items of the pattern */
104 ice_pattern_skip_void_item(struct rte_flow_item *items,
105 const struct rte_flow_item *pattern)
107 uint32_t cpy_count = 0;
108 const struct rte_flow_item *pb = pattern, *pe = pattern;
111 /* Find a non-void item first */
112 pb = ice_find_first_item(pb, false);
113 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
118 /* Find a void item */
119 pe = ice_find_first_item(pb + 1, true);
122 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
126 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
132 /* Copy the END item. */
133 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
136 /* Check if the pattern matches a supported item type array */
138 ice_match_pattern(enum rte_flow_item_type *item_array,
139 const struct rte_flow_item *pattern)
141 const struct rte_flow_item *item = pattern;
143 while ((*item_array == item->type) &&
144 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
149 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
150 item->type == RTE_FLOW_ITEM_TYPE_END);
153 static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
154 struct rte_flow_error *error)
158 struct rte_flow_item *items; /* used for pattern without VOID items */
159 uint32_t item_num = 0; /* non-void item number */
161 /* Get the non-void item number of pattern */
162 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
163 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
169 items = rte_zmalloc("ice_pattern",
170 item_num * sizeof(struct rte_flow_item), 0);
172 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
173 NULL, "No memory for PMD internal items.");
177 ice_pattern_skip_void_item(items, pattern);
179 for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
180 if (ice_match_pattern(ice_supported_patterns[i].items,
182 inset = ice_supported_patterns[i].sw_fields;
186 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
187 pattern, "Unsupported pattern");
193 static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
194 struct rte_flow_error *error)
196 const struct rte_flow_item *item = pattern;
197 const struct rte_flow_item_eth *eth_spec, *eth_mask;
198 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
199 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
200 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
201 const struct rte_flow_item_udp *udp_spec, *udp_mask;
202 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
203 const struct rte_flow_item_icmp *icmp_mask;
204 const struct rte_flow_item_icmp6 *icmp6_mask;
205 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
206 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
207 enum rte_flow_item_type item_type;
208 uint8_t ipv6_addr_mask[16] = {
209 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
210 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
211 uint64_t input_set = ICE_INSET_NONE;
212 bool is_tunnel = false;
214 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
216 rte_flow_error_set(error, EINVAL,
217 RTE_FLOW_ERROR_TYPE_ITEM,
219 "Not support range");
222 item_type = item->type;
224 case RTE_FLOW_ITEM_TYPE_ETH:
225 eth_spec = item->spec;
226 eth_mask = item->mask;
228 if (eth_spec && eth_mask) {
229 if (rte_is_broadcast_ether_addr(ð_mask->src))
230 input_set |= ICE_INSET_SMAC;
231 if (rte_is_broadcast_ether_addr(ð_mask->dst))
232 input_set |= ICE_INSET_DMAC;
233 if (eth_mask->type == RTE_BE16(0xffff))
234 input_set |= ICE_INSET_ETHERTYPE;
237 case RTE_FLOW_ITEM_TYPE_IPV4:
238 ipv4_spec = item->spec;
239 ipv4_mask = item->mask;
241 if (!(ipv4_spec && ipv4_mask))
244 /* Check IPv4 mask and update input set */
245 if (ipv4_mask->hdr.version_ihl ||
246 ipv4_mask->hdr.total_length ||
247 ipv4_mask->hdr.packet_id ||
248 ipv4_mask->hdr.hdr_checksum) {
249 rte_flow_error_set(error, EINVAL,
250 RTE_FLOW_ERROR_TYPE_ITEM,
252 "Invalid IPv4 mask.");
257 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
258 input_set |= ICE_INSET_TUN_IPV4_SRC;
259 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
260 input_set |= ICE_INSET_TUN_IPV4_DST;
261 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
262 input_set |= ICE_INSET_TUN_IPV4_TTL;
263 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
264 input_set |= ICE_INSET_TUN_IPV4_PROTO;
266 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
267 input_set |= ICE_INSET_IPV4_SRC;
268 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
269 input_set |= ICE_INSET_IPV4_DST;
270 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
271 input_set |= ICE_INSET_IPV4_TTL;
272 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
273 input_set |= ICE_INSET_IPV4_PROTO;
274 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
275 input_set |= ICE_INSET_IPV4_TOS;
278 case RTE_FLOW_ITEM_TYPE_IPV6:
279 ipv6_spec = item->spec;
280 ipv6_mask = item->mask;
282 if (!(ipv6_spec && ipv6_mask))
285 if (ipv6_mask->hdr.payload_len) {
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_ITEM,
289 "Invalid IPv6 mask");
294 if (!memcmp(ipv6_mask->hdr.src_addr,
296 RTE_DIM(ipv6_mask->hdr.src_addr)))
297 input_set |= ICE_INSET_TUN_IPV6_SRC;
298 if (!memcmp(ipv6_mask->hdr.dst_addr,
300 RTE_DIM(ipv6_mask->hdr.dst_addr)))
301 input_set |= ICE_INSET_TUN_IPV6_DST;
302 if (ipv6_mask->hdr.proto == UINT8_MAX)
303 input_set |= ICE_INSET_TUN_IPV6_PROTO;
304 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
305 input_set |= ICE_INSET_TUN_IPV6_TTL;
307 if (!memcmp(ipv6_mask->hdr.src_addr,
309 RTE_DIM(ipv6_mask->hdr.src_addr)))
310 input_set |= ICE_INSET_IPV6_SRC;
311 if (!memcmp(ipv6_mask->hdr.dst_addr,
313 RTE_DIM(ipv6_mask->hdr.dst_addr)))
314 input_set |= ICE_INSET_IPV6_DST;
315 if (ipv6_mask->hdr.proto == UINT8_MAX)
316 input_set |= ICE_INSET_IPV6_PROTO;
317 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
318 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
319 if ((ipv6_mask->hdr.vtc_flow &
320 rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
322 (RTE_IPV6_HDR_TC_MASK))
323 input_set |= ICE_INSET_IPV6_TOS;
327 case RTE_FLOW_ITEM_TYPE_UDP:
328 udp_spec = item->spec;
329 udp_mask = item->mask;
331 if (!(udp_spec && udp_mask))
334 /* Check UDP mask and update input set*/
335 if (udp_mask->hdr.dgram_len ||
336 udp_mask->hdr.dgram_cksum) {
337 rte_flow_error_set(error, EINVAL,
338 RTE_FLOW_ERROR_TYPE_ITEM,
345 if (udp_mask->hdr.src_port == UINT16_MAX)
346 input_set |= ICE_INSET_TUN_SRC_PORT;
347 if (udp_mask->hdr.dst_port == UINT16_MAX)
348 input_set |= ICE_INSET_TUN_DST_PORT;
350 if (udp_mask->hdr.src_port == UINT16_MAX)
351 input_set |= ICE_INSET_SRC_PORT;
352 if (udp_mask->hdr.dst_port == UINT16_MAX)
353 input_set |= ICE_INSET_DST_PORT;
357 case RTE_FLOW_ITEM_TYPE_TCP:
358 tcp_spec = item->spec;
359 tcp_mask = item->mask;
361 if (!(tcp_spec && tcp_mask))
364 /* Check TCP mask and update input set */
365 if (tcp_mask->hdr.sent_seq ||
366 tcp_mask->hdr.recv_ack ||
367 tcp_mask->hdr.data_off ||
368 tcp_mask->hdr.tcp_flags ||
369 tcp_mask->hdr.rx_win ||
370 tcp_mask->hdr.cksum ||
371 tcp_mask->hdr.tcp_urp) {
372 rte_flow_error_set(error, EINVAL,
373 RTE_FLOW_ERROR_TYPE_ITEM,
380 if (tcp_mask->hdr.src_port == UINT16_MAX)
381 input_set |= ICE_INSET_TUN_SRC_PORT;
382 if (tcp_mask->hdr.dst_port == UINT16_MAX)
383 input_set |= ICE_INSET_TUN_DST_PORT;
385 if (tcp_mask->hdr.src_port == UINT16_MAX)
386 input_set |= ICE_INSET_SRC_PORT;
387 if (tcp_mask->hdr.dst_port == UINT16_MAX)
388 input_set |= ICE_INSET_DST_PORT;
392 case RTE_FLOW_ITEM_TYPE_SCTP:
393 sctp_spec = item->spec;
394 sctp_mask = item->mask;
396 if (!(sctp_spec && sctp_mask))
399 /* Check SCTP mask and update input set */
400 if (sctp_mask->hdr.cksum) {
401 rte_flow_error_set(error, EINVAL,
402 RTE_FLOW_ERROR_TYPE_ITEM,
404 "Invalid SCTP mask");
409 if (sctp_mask->hdr.src_port == UINT16_MAX)
410 input_set |= ICE_INSET_TUN_SRC_PORT;
411 if (sctp_mask->hdr.dst_port == UINT16_MAX)
412 input_set |= ICE_INSET_TUN_DST_PORT;
414 if (sctp_mask->hdr.src_port == UINT16_MAX)
415 input_set |= ICE_INSET_SRC_PORT;
416 if (sctp_mask->hdr.dst_port == UINT16_MAX)
417 input_set |= ICE_INSET_DST_PORT;
421 case RTE_FLOW_ITEM_TYPE_ICMP:
422 icmp_mask = item->mask;
423 if (icmp_mask->hdr.icmp_code ||
424 icmp_mask->hdr.icmp_cksum ||
425 icmp_mask->hdr.icmp_ident ||
426 icmp_mask->hdr.icmp_seq_nb) {
427 rte_flow_error_set(error, EINVAL,
428 RTE_FLOW_ERROR_TYPE_ITEM,
430 "Invalid ICMP mask");
434 if (icmp_mask->hdr.icmp_type == UINT8_MAX)
435 input_set |= ICE_INSET_ICMP;
437 case RTE_FLOW_ITEM_TYPE_ICMP6:
438 icmp6_mask = item->mask;
439 if (icmp6_mask->code ||
440 icmp6_mask->checksum) {
441 rte_flow_error_set(error, EINVAL,
442 RTE_FLOW_ERROR_TYPE_ITEM,
444 "Invalid ICMP6 mask");
448 if (icmp6_mask->type == UINT8_MAX)
449 input_set |= ICE_INSET_ICMP6;
451 case RTE_FLOW_ITEM_TYPE_VXLAN:
452 vxlan_spec = item->spec;
453 vxlan_mask = item->mask;
454 /* Check if VXLAN item is used to describe protocol.
455 * If yes, both spec and mask should be NULL.
456 * If no, both spec and mask shouldn't be NULL.
458 if ((!vxlan_spec && vxlan_mask) ||
459 (vxlan_spec && !vxlan_mask)) {
460 rte_flow_error_set(error, EINVAL,
461 RTE_FLOW_ERROR_TYPE_ITEM,
463 "Invalid VXLAN item");
466 if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX &&
467 vxlan_mask->vni[1] == UINT8_MAX &&
468 vxlan_mask->vni[2] == UINT8_MAX)
469 input_set |= ICE_INSET_TUN_ID;
473 case RTE_FLOW_ITEM_TYPE_NVGRE:
474 nvgre_spec = item->spec;
475 nvgre_mask = item->mask;
476 /* Check if NVGRE item is used to describe protocol.
477 * If yes, both spec and mask should be NULL.
478 * If no, both spec and mask shouldn't be NULL.
480 if ((!nvgre_spec && nvgre_mask) ||
481 (nvgre_spec && !nvgre_mask)) {
482 rte_flow_error_set(error, EINVAL,
483 RTE_FLOW_ERROR_TYPE_ITEM,
485 "Invalid NVGRE item");
488 if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX &&
489 nvgre_mask->tni[1] == UINT8_MAX &&
490 nvgre_mask->tni[2] == UINT8_MAX)
491 input_set |= ICE_INSET_TUN_ID;
495 case RTE_FLOW_ITEM_TYPE_VOID:
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ITEM,
508 static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
509 uint64_t inset, struct rte_flow_error *error)
513 /* get valid field */
514 fields = ice_get_flow_field(pattern, error);
515 if (!fields || fields & (~inset)) {
516 rte_flow_error_set(error, EINVAL,
517 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
519 "Invalid input set");
526 static int ice_flow_valid_action(struct rte_eth_dev *dev,
527 const struct rte_flow_action *actions,
528 struct rte_flow_error *error)
530 const struct rte_flow_action_queue *act_q;
532 const struct rte_flow_action *action;
533 for (action = actions; action->type !=
534 RTE_FLOW_ACTION_TYPE_END; action++) {
535 switch (action->type) {
536 case RTE_FLOW_ACTION_TYPE_QUEUE:
537 act_q = action->conf;
538 queue = act_q->index;
539 if (queue >= dev->data->nb_rx_queues) {
540 rte_flow_error_set(error, EINVAL,
541 RTE_FLOW_ERROR_TYPE_ACTION,
542 actions, "Invalid queue ID for"
547 case RTE_FLOW_ACTION_TYPE_DROP:
548 case RTE_FLOW_ACTION_TYPE_VOID:
551 rte_flow_error_set(error, EINVAL,
552 RTE_FLOW_ERROR_TYPE_ACTION, actions,
561 ice_flow_validate(struct rte_eth_dev *dev,
562 const struct rte_flow_attr *attr,
563 const struct rte_flow_item pattern[],
564 const struct rte_flow_action actions[],
565 struct rte_flow_error *error)
568 int ret = ICE_ERR_NOT_SUPPORTED;
571 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
572 NULL, "NULL pattern.");
577 rte_flow_error_set(error, EINVAL,
578 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
579 NULL, "NULL action.");
584 rte_flow_error_set(error, EINVAL,
585 RTE_FLOW_ERROR_TYPE_ATTR,
586 NULL, "NULL attribute.");
590 ret = ice_flow_valid_attr(attr, error);
594 inset = ice_flow_valid_pattern(pattern, error);
598 ret = ice_flow_valid_inset(pattern, inset, error);
602 ret = ice_flow_valid_action(dev, actions, error);
609 static struct rte_flow *
610 ice_flow_create(struct rte_eth_dev *dev,
611 const struct rte_flow_attr *attr,
612 const struct rte_flow_item pattern[],
613 const struct rte_flow_action actions[],
614 struct rte_flow_error *error)
616 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
617 struct rte_flow *flow = NULL;
620 flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
622 rte_flow_error_set(error, ENOMEM,
623 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
624 "Failed to allocate memory");
628 ret = ice_flow_validate(dev, attr, pattern, actions, error);
632 ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
636 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
640 rte_flow_error_set(error, -ret,
641 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
642 "Failed to create flow.");
648 ice_flow_destroy(struct rte_eth_dev *dev,
649 struct rte_flow *flow,
650 struct rte_flow_error *error)
652 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
655 ret = ice_destroy_switch_filter(pf, flow, error);
658 TAILQ_REMOVE(&pf->flow_list, flow, node);
661 rte_flow_error_set(error, -ret,
662 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
663 "Failed to destroy flow.");
670 ice_flow_flush(struct rte_eth_dev *dev,
671 struct rte_flow_error *error)
673 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
674 struct rte_flow *p_flow;
678 TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
679 ret = ice_flow_destroy(dev, p_flow, error);
681 rte_flow_error_set(error, -ret,
682 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
683 "Failed to flush SW flows.");