1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
17 #include "ice_ethdev.h"
18 #include "ice_generic_flow.h"
19 #include "ice_switch_filter.h"
21 static int ice_flow_validate(struct rte_eth_dev *dev,
22 const struct rte_flow_attr *attr,
23 const struct rte_flow_item pattern[],
24 const struct rte_flow_action actions[],
25 struct rte_flow_error *error);
26 static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
27 const struct rte_flow_attr *attr,
28 const struct rte_flow_item pattern[],
29 const struct rte_flow_action actions[],
30 struct rte_flow_error *error);
31 static int ice_flow_destroy(struct rte_eth_dev *dev,
32 struct rte_flow *flow,
33 struct rte_flow_error *error);
34 static int ice_flow_flush(struct rte_eth_dev *dev,
35 struct rte_flow_error *error);
37 const struct rte_flow_ops ice_flow_ops = {
38 .validate = ice_flow_validate,
39 .create = ice_flow_create,
40 .destroy = ice_flow_destroy,
41 .flush = ice_flow_flush,
45 ice_flow_valid_attr(const struct rte_flow_attr *attr,
46 struct rte_flow_error *error)
48 /* Must be input direction */
50 rte_flow_error_set(error, EINVAL,
51 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
52 attr, "Only support ingress.");
58 rte_flow_error_set(error, EINVAL,
59 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
60 attr, "Not support egress.");
66 rte_flow_error_set(error, EINVAL,
67 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
68 attr, "Not support priority.");
74 rte_flow_error_set(error, EINVAL,
75 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
76 attr, "Not support group.");
83 /* Find the first VOID or non-VOID item pointer */
84 static const struct rte_flow_item *
85 ice_find_first_item(const struct rte_flow_item *item, bool is_void)
89 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
91 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
93 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
101 /* Skip all VOID items of the pattern */
103 ice_pattern_skip_void_item(struct rte_flow_item *items,
104 const struct rte_flow_item *pattern)
106 uint32_t cpy_count = 0;
107 const struct rte_flow_item *pb = pattern, *pe = pattern;
110 /* Find a non-void item first */
111 pb = ice_find_first_item(pb, false);
112 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
117 /* Find a void item */
118 pe = ice_find_first_item(pb + 1, true);
121 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
125 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
132 /* Copy the END item. */
133 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
136 /* Check if the pattern matches a supported item type array */
138 ice_match_pattern(enum rte_flow_item_type *item_array,
139 const struct rte_flow_item *pattern)
141 const struct rte_flow_item *item = pattern;
143 while ((*item_array == item->type) &&
144 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
149 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
150 item->type == RTE_FLOW_ITEM_TYPE_END);
153 static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
154 struct rte_flow_error *error)
158 struct rte_flow_item *items; /* used for pattern without VOID items */
159 uint32_t item_num = 0; /* non-void item number */
161 /* Get the non-void item number of pattern */
162 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
163 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
169 items = rte_zmalloc("ice_pattern",
170 item_num * sizeof(struct rte_flow_item), 0);
172 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
173 NULL, "No memory for PMD internal items.");
177 ice_pattern_skip_void_item(items, pattern);
179 for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
180 if (ice_match_pattern(ice_supported_patterns[i].items,
182 inset = ice_supported_patterns[i].sw_fields;
186 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
187 pattern, "Unsupported pattern");
193 static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
194 struct rte_flow_error *error)
196 const struct rte_flow_item *item = pattern;
197 const struct rte_flow_item_eth *eth_spec, *eth_mask;
198 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
199 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
200 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
201 const struct rte_flow_item_udp *udp_spec, *udp_mask;
202 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
203 const struct rte_flow_item_icmp *icmp_mask;
204 const struct rte_flow_item_icmp6 *icmp6_mask;
205 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
206 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
207 enum rte_flow_item_type item_type;
208 uint8_t ipv6_addr_mask[16] = {
209 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
210 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
211 uint64_t input_set = ICE_INSET_NONE;
212 bool is_tunnel = false;
214 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
216 rte_flow_error_set(error, EINVAL,
217 RTE_FLOW_ERROR_TYPE_ITEM,
219 "Not support range");
222 item_type = item->type;
224 case RTE_FLOW_ITEM_TYPE_ETH:
225 eth_spec = item->spec;
226 eth_mask = item->mask;
228 if (eth_spec && eth_mask) {
229 if (rte_is_broadcast_ether_addr(ð_mask->src))
230 input_set |= ICE_INSET_SMAC;
231 if (rte_is_broadcast_ether_addr(ð_mask->dst))
232 input_set |= ICE_INSET_DMAC;
233 if (eth_mask->type == RTE_BE16(0xffff))
234 input_set |= ICE_INSET_ETHERTYPE;
237 case RTE_FLOW_ITEM_TYPE_IPV4:
238 ipv4_spec = item->spec;
239 ipv4_mask = item->mask;
241 if (!(ipv4_spec && ipv4_mask))
244 /* Check IPv4 mask and update input set */
245 if (ipv4_mask->hdr.version_ihl ||
246 ipv4_mask->hdr.total_length ||
247 ipv4_mask->hdr.packet_id ||
248 ipv4_mask->hdr.hdr_checksum) {
249 rte_flow_error_set(error, EINVAL,
250 RTE_FLOW_ERROR_TYPE_ITEM,
252 "Invalid IPv4 mask.");
257 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
258 input_set |= ICE_INSET_TUN_IPV4_SRC;
259 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
260 input_set |= ICE_INSET_TUN_IPV4_DST;
261 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
262 input_set |= ICE_INSET_TUN_IPV4_TTL;
263 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
264 input_set |= ICE_INSET_TUN_IPV4_PROTO;
266 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
267 input_set |= ICE_INSET_IPV4_SRC;
268 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
269 input_set |= ICE_INSET_IPV4_DST;
270 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
271 input_set |= ICE_INSET_IPV4_TTL;
272 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
273 input_set |= ICE_INSET_IPV4_PROTO;
274 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
275 input_set |= ICE_INSET_IPV4_TOS;
278 case RTE_FLOW_ITEM_TYPE_IPV6:
279 ipv6_spec = item->spec;
280 ipv6_mask = item->mask;
282 if (!(ipv6_spec && ipv6_mask))
285 if (ipv6_mask->hdr.payload_len ||
286 ipv6_mask->hdr.vtc_flow) {
287 rte_flow_error_set(error, EINVAL,
288 RTE_FLOW_ERROR_TYPE_ITEM,
290 "Invalid IPv6 mask");
295 if (!memcmp(ipv6_mask->hdr.src_addr,
297 RTE_DIM(ipv6_mask->hdr.src_addr)))
298 input_set |= ICE_INSET_TUN_IPV6_SRC;
299 if (!memcmp(ipv6_mask->hdr.dst_addr,
301 RTE_DIM(ipv6_mask->hdr.dst_addr)))
302 input_set |= ICE_INSET_TUN_IPV6_DST;
303 if (ipv6_mask->hdr.proto == UINT8_MAX)
304 input_set |= ICE_INSET_TUN_IPV6_PROTO;
305 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
306 input_set |= ICE_INSET_TUN_IPV6_TTL;
308 if (!memcmp(ipv6_mask->hdr.src_addr,
310 RTE_DIM(ipv6_mask->hdr.src_addr)))
311 input_set |= ICE_INSET_IPV6_SRC;
312 if (!memcmp(ipv6_mask->hdr.dst_addr,
314 RTE_DIM(ipv6_mask->hdr.dst_addr)))
315 input_set |= ICE_INSET_IPV6_DST;
316 if (ipv6_mask->hdr.proto == UINT8_MAX)
317 input_set |= ICE_INSET_IPV6_PROTO;
318 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
319 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
323 case RTE_FLOW_ITEM_TYPE_UDP:
324 udp_spec = item->spec;
325 udp_mask = item->mask;
327 if (!(udp_spec && udp_mask))
330 /* Check UDP mask and update input set*/
331 if (udp_mask->hdr.dgram_len ||
332 udp_mask->hdr.dgram_cksum) {
333 rte_flow_error_set(error, EINVAL,
334 RTE_FLOW_ERROR_TYPE_ITEM,
341 if (udp_mask->hdr.src_port == UINT16_MAX)
342 input_set |= ICE_INSET_TUN_SRC_PORT;
343 if (udp_mask->hdr.dst_port == UINT16_MAX)
344 input_set |= ICE_INSET_TUN_DST_PORT;
346 if (udp_mask->hdr.src_port == UINT16_MAX)
347 input_set |= ICE_INSET_SRC_PORT;
348 if (udp_mask->hdr.dst_port == UINT16_MAX)
349 input_set |= ICE_INSET_DST_PORT;
353 case RTE_FLOW_ITEM_TYPE_TCP:
354 tcp_spec = item->spec;
355 tcp_mask = item->mask;
357 if (!(tcp_spec && tcp_mask))
360 /* Check TCP mask and update input set */
361 if (tcp_mask->hdr.sent_seq ||
362 tcp_mask->hdr.recv_ack ||
363 tcp_mask->hdr.data_off ||
364 tcp_mask->hdr.tcp_flags ||
365 tcp_mask->hdr.rx_win ||
366 tcp_mask->hdr.cksum ||
367 tcp_mask->hdr.tcp_urp) {
368 rte_flow_error_set(error, EINVAL,
369 RTE_FLOW_ERROR_TYPE_ITEM,
376 if (tcp_mask->hdr.src_port == UINT16_MAX)
377 input_set |= ICE_INSET_TUN_SRC_PORT;
378 if (tcp_mask->hdr.dst_port == UINT16_MAX)
379 input_set |= ICE_INSET_TUN_DST_PORT;
381 if (tcp_mask->hdr.src_port == UINT16_MAX)
382 input_set |= ICE_INSET_SRC_PORT;
383 if (tcp_mask->hdr.dst_port == UINT16_MAX)
384 input_set |= ICE_INSET_DST_PORT;
388 case RTE_FLOW_ITEM_TYPE_SCTP:
389 sctp_spec = item->spec;
390 sctp_mask = item->mask;
392 if (!(sctp_spec && sctp_mask))
395 /* Check SCTP mask and update input set */
396 if (sctp_mask->hdr.cksum) {
397 rte_flow_error_set(error, EINVAL,
398 RTE_FLOW_ERROR_TYPE_ITEM,
400 "Invalid SCTP mask");
405 if (sctp_mask->hdr.src_port == UINT16_MAX)
406 input_set |= ICE_INSET_TUN_SRC_PORT;
407 if (sctp_mask->hdr.dst_port == UINT16_MAX)
408 input_set |= ICE_INSET_TUN_DST_PORT;
410 if (sctp_mask->hdr.src_port == UINT16_MAX)
411 input_set |= ICE_INSET_SRC_PORT;
412 if (sctp_mask->hdr.dst_port == UINT16_MAX)
413 input_set |= ICE_INSET_DST_PORT;
417 case RTE_FLOW_ITEM_TYPE_ICMP:
418 icmp_mask = item->mask;
419 if (icmp_mask->hdr.icmp_code ||
420 icmp_mask->hdr.icmp_cksum ||
421 icmp_mask->hdr.icmp_ident ||
422 icmp_mask->hdr.icmp_seq_nb) {
423 rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_ITEM,
426 "Invalid ICMP mask");
430 if (icmp_mask->hdr.icmp_type == UINT8_MAX)
431 input_set |= ICE_INSET_ICMP;
433 case RTE_FLOW_ITEM_TYPE_ICMP6:
434 icmp6_mask = item->mask;
435 if (icmp6_mask->code ||
436 icmp6_mask->checksum) {
437 rte_flow_error_set(error, EINVAL,
438 RTE_FLOW_ERROR_TYPE_ITEM,
440 "Invalid ICMP6 mask");
444 if (icmp6_mask->type == UINT8_MAX)
445 input_set |= ICE_INSET_ICMP6;
447 case RTE_FLOW_ITEM_TYPE_VXLAN:
448 vxlan_spec = item->spec;
449 vxlan_mask = item->mask;
450 /* Check if VXLAN item is used to describe protocol.
451 * If yes, both spec and mask should be NULL.
452 * If no, both spec and mask shouldn't be NULL.
454 if ((!vxlan_spec && vxlan_mask) ||
455 (vxlan_spec && !vxlan_mask)) {
456 rte_flow_error_set(error, EINVAL,
457 RTE_FLOW_ERROR_TYPE_ITEM,
459 "Invalid VXLAN item");
465 case RTE_FLOW_ITEM_TYPE_NVGRE:
466 nvgre_spec = item->spec;
467 nvgre_mask = item->mask;
468 /* Check if VXLAN item is used to describe protocol.
469 * If yes, both spec and mask should be NULL.
470 * If no, both spec and mask shouldn't be NULL.
472 if ((!nvgre_spec && nvgre_mask) ||
473 (nvgre_spec && !nvgre_mask)) {
474 rte_flow_error_set(error, EINVAL,
475 RTE_FLOW_ERROR_TYPE_ITEM,
477 "Invalid NVGRE item");
484 rte_flow_error_set(error, EINVAL,
485 RTE_FLOW_ERROR_TYPE_ITEM,
487 "Invalid mask no exist");
494 static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
495 uint64_t inset, struct rte_flow_error *error)
499 /* get valid field */
500 fields = ice_get_flow_field(pattern, error);
501 if (!fields || fields & (~inset)) {
502 rte_flow_error_set(error, EINVAL,
503 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
505 "Invalid input set");
512 static int ice_flow_valid_action(struct rte_eth_dev *dev,
513 const struct rte_flow_action *actions,
514 struct rte_flow_error *error)
516 const struct rte_flow_action_queue *act_q;
519 switch (actions->type) {
520 case RTE_FLOW_ACTION_TYPE_QUEUE:
521 act_q = actions->conf;
522 queue = act_q->index;
523 if (queue >= dev->data->nb_rx_queues) {
524 rte_flow_error_set(error, EINVAL,
525 RTE_FLOW_ERROR_TYPE_ACTION,
526 actions, "Invalid queue ID for"
531 case RTE_FLOW_ACTION_TYPE_DROP:
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ACTION, actions,
544 ice_flow_validate(struct rte_eth_dev *dev,
545 const struct rte_flow_attr *attr,
546 const struct rte_flow_item pattern[],
547 const struct rte_flow_action actions[],
548 struct rte_flow_error *error)
551 int ret = ICE_ERR_NOT_SUPPORTED;
554 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
555 NULL, "NULL pattern.");
560 rte_flow_error_set(error, EINVAL,
561 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
562 NULL, "NULL action.");
567 rte_flow_error_set(error, EINVAL,
568 RTE_FLOW_ERROR_TYPE_ATTR,
569 NULL, "NULL attribute.");
573 ret = ice_flow_valid_attr(attr, error);
577 inset = ice_flow_valid_pattern(pattern, error);
581 ret = ice_flow_valid_inset(pattern, inset, error);
585 ret = ice_flow_valid_action(dev, actions, error);
592 static struct rte_flow *
593 ice_flow_create(struct rte_eth_dev *dev,
594 const struct rte_flow_attr *attr,
595 const struct rte_flow_item pattern[],
596 const struct rte_flow_action actions[],
597 struct rte_flow_error *error)
599 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
600 struct rte_flow *flow = NULL;
603 flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
605 rte_flow_error_set(error, ENOMEM,
606 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
607 "Failed to allocate memory");
611 ret = ice_flow_validate(dev, attr, pattern, actions, error);
615 ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
619 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
623 rte_flow_error_set(error, -ret,
624 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
625 "Failed to create flow.");
631 ice_flow_destroy(struct rte_eth_dev *dev,
632 struct rte_flow *flow,
633 struct rte_flow_error *error)
635 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
638 ret = ice_destroy_switch_filter(pf, flow, error);
641 TAILQ_REMOVE(&pf->flow_list, flow, node);
644 rte_flow_error_set(error, -ret,
645 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
646 "Failed to destroy flow.");
653 ice_flow_flush(struct rte_eth_dev *dev,
654 struct rte_flow_error *error)
656 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
657 struct rte_flow *p_flow;
660 TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
661 ret = ice_flow_destroy(dev, p_flow, error);
663 rte_flow_error_set(error, -ret,
664 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
665 "Failed to flush SW flows.");