1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
17 #include "ice_ethdev.h"
18 #include "ice_generic_flow.h"
19 #include "ice_switch_filter.h"
21 static int ice_flow_validate(struct rte_eth_dev *dev,
22 const struct rte_flow_attr *attr,
23 const struct rte_flow_item pattern[],
24 const struct rte_flow_action actions[],
25 struct rte_flow_error *error);
26 static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
27 const struct rte_flow_attr *attr,
28 const struct rte_flow_item pattern[],
29 const struct rte_flow_action actions[],
30 struct rte_flow_error *error);
31 static int ice_flow_destroy(struct rte_eth_dev *dev,
32 struct rte_flow *flow,
33 struct rte_flow_error *error);
34 static int ice_flow_flush(struct rte_eth_dev *dev,
35 struct rte_flow_error *error);
37 const struct rte_flow_ops ice_flow_ops = {
38 .validate = ice_flow_validate,
39 .create = ice_flow_create,
40 .destroy = ice_flow_destroy,
41 .flush = ice_flow_flush,
45 ice_flow_valid_attr(const struct rte_flow_attr *attr,
46 struct rte_flow_error *error)
48 /* Must be input direction */
50 rte_flow_error_set(error, EINVAL,
51 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
52 attr, "Only support ingress.");
58 rte_flow_error_set(error, EINVAL,
59 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
60 attr, "Not support egress.");
66 rte_flow_error_set(error, EINVAL,
67 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
68 attr, "Not support priority.");
74 rte_flow_error_set(error, EINVAL,
75 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
76 attr, "Not support group.");
83 /* Find the first VOID or non-VOID item pointer */
84 static const struct rte_flow_item *
85 ice_find_first_item(const struct rte_flow_item *item, bool is_void)
89 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
91 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
93 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
101 /* Skip all VOID items of the pattern */
103 ice_pattern_skip_void_item(struct rte_flow_item *items,
104 const struct rte_flow_item *pattern)
106 uint32_t cpy_count = 0;
107 const struct rte_flow_item *pb = pattern, *pe = pattern;
110 /* Find a non-void item first */
111 pb = ice_find_first_item(pb, false);
112 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
117 /* Find a void item */
118 pe = ice_find_first_item(pb + 1, true);
121 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
125 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
132 /* Copy the END item. */
133 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
136 /* Check if the pattern matches a supported item type array */
138 ice_match_pattern(enum rte_flow_item_type *item_array,
139 const struct rte_flow_item *pattern)
141 const struct rte_flow_item *item = pattern;
143 while ((*item_array == item->type) &&
144 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
149 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
150 item->type == RTE_FLOW_ITEM_TYPE_END);
153 static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
154 struct rte_flow_error *error)
158 struct rte_flow_item *items; /* used for pattern without VOID items */
159 uint32_t item_num = 0; /* non-void item number */
161 /* Get the non-void item number of pattern */
162 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
163 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
169 items = rte_zmalloc("ice_pattern",
170 item_num * sizeof(struct rte_flow_item), 0);
172 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
173 NULL, "No memory for PMD internal items.");
177 ice_pattern_skip_void_item(items, pattern);
179 for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
180 if (ice_match_pattern(ice_supported_patterns[i].items,
182 inset = ice_supported_patterns[i].sw_fields;
186 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
187 pattern, "Unsupported pattern");
193 static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
194 struct rte_flow_error *error)
196 const struct rte_flow_item *item = pattern;
197 const struct rte_flow_item_eth *eth_spec, *eth_mask;
198 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
199 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
200 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
201 const struct rte_flow_item_udp *udp_spec, *udp_mask;
202 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
203 const struct rte_flow_item_icmp *icmp_mask;
204 const struct rte_flow_item_icmp6 *icmp6_mask;
205 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
206 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
207 enum rte_flow_item_type item_type;
208 uint8_t ipv6_addr_mask[16] = {
209 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
210 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
211 uint64_t input_set = ICE_INSET_NONE;
212 bool outer_ip = true;
213 bool outer_l4 = true;
215 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
217 rte_flow_error_set(error, EINVAL,
218 RTE_FLOW_ERROR_TYPE_ITEM,
220 "Not support range");
223 item_type = item->type;
225 case RTE_FLOW_ITEM_TYPE_ETH:
226 eth_spec = item->spec;
227 eth_mask = item->mask;
229 if (eth_spec && eth_mask) {
230 if (rte_is_broadcast_ether_addr(ð_mask->src))
231 input_set |= ICE_INSET_SMAC;
232 if (rte_is_broadcast_ether_addr(ð_mask->dst))
233 input_set |= ICE_INSET_DMAC;
234 if (eth_mask->type == RTE_BE16(0xffff))
235 input_set |= ICE_INSET_ETHERTYPE;
238 case RTE_FLOW_ITEM_TYPE_IPV4:
239 ipv4_spec = item->spec;
240 ipv4_mask = item->mask;
242 if (!(ipv4_spec && ipv4_mask)) {
243 rte_flow_error_set(error, EINVAL,
244 RTE_FLOW_ERROR_TYPE_ITEM,
246 "Invalid IPv4 spec or mask.");
250 /* Check IPv4 mask and update input set */
251 if (ipv4_mask->hdr.version_ihl ||
252 ipv4_mask->hdr.total_length ||
253 ipv4_mask->hdr.packet_id ||
254 ipv4_mask->hdr.hdr_checksum) {
255 rte_flow_error_set(error, EINVAL,
256 RTE_FLOW_ERROR_TYPE_ITEM,
258 "Invalid IPv4 mask.");
263 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
264 input_set |= ICE_INSET_IPV4_SRC;
265 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
266 input_set |= ICE_INSET_IPV4_DST;
267 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
268 input_set |= ICE_INSET_IPV4_TOS;
269 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
270 input_set |= ICE_INSET_IPV4_TTL;
271 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
272 input_set |= ICE_INSET_IPV4_PROTO;
275 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
276 input_set |= ICE_INSET_TUN_IPV4_SRC;
277 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
278 input_set |= ICE_INSET_TUN_IPV4_DST;
279 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
280 input_set |= ICE_INSET_TUN_IPV4_TTL;
281 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
282 input_set |= ICE_INSET_TUN_IPV4_PROTO;
285 case RTE_FLOW_ITEM_TYPE_IPV6:
286 ipv6_spec = item->spec;
287 ipv6_mask = item->mask;
289 if (!(ipv6_spec && ipv6_mask)) {
290 rte_flow_error_set(error, EINVAL,
291 RTE_FLOW_ERROR_TYPE_ITEM,
292 item, "Invalid IPv6 spec or mask");
296 if (ipv6_mask->hdr.payload_len ||
297 ipv6_mask->hdr.vtc_flow) {
298 rte_flow_error_set(error, EINVAL,
299 RTE_FLOW_ERROR_TYPE_ITEM,
301 "Invalid IPv6 mask");
306 if (!memcmp(ipv6_mask->hdr.src_addr,
308 RTE_DIM(ipv6_mask->hdr.src_addr)))
309 input_set |= ICE_INSET_IPV6_SRC;
310 if (!memcmp(ipv6_mask->hdr.dst_addr,
312 RTE_DIM(ipv6_mask->hdr.dst_addr)))
313 input_set |= ICE_INSET_IPV6_DST;
314 if (ipv6_mask->hdr.proto == UINT8_MAX)
315 input_set |= ICE_INSET_IPV6_PROTO;
316 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
317 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
320 if (!memcmp(ipv6_mask->hdr.src_addr,
322 RTE_DIM(ipv6_mask->hdr.src_addr)))
323 input_set |= ICE_INSET_TUN_IPV6_SRC;
324 if (!memcmp(ipv6_mask->hdr.dst_addr,
326 RTE_DIM(ipv6_mask->hdr.dst_addr)))
327 input_set |= ICE_INSET_TUN_IPV6_DST;
328 if (ipv6_mask->hdr.proto == UINT8_MAX)
329 input_set |= ICE_INSET_TUN_IPV6_PROTO;
330 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
331 input_set |= ICE_INSET_TUN_IPV6_TTL;
335 case RTE_FLOW_ITEM_TYPE_UDP:
336 udp_spec = item->spec;
337 udp_mask = item->mask;
339 if (!(udp_spec && udp_mask)) {
340 rte_flow_error_set(error, EINVAL,
341 RTE_FLOW_ERROR_TYPE_ITEM,
342 item, "Invalid UDP mask");
346 /* Check UDP mask and update input set*/
347 if (udp_mask->hdr.dgram_len ||
348 udp_mask->hdr.dgram_cksum) {
349 rte_flow_error_set(error, EINVAL,
350 RTE_FLOW_ERROR_TYPE_ITEM,
357 if (udp_mask->hdr.src_port == UINT16_MAX)
358 input_set |= ICE_INSET_SRC_PORT;
359 if (udp_mask->hdr.dst_port == UINT16_MAX)
360 input_set |= ICE_INSET_DST_PORT;
363 if (udp_mask->hdr.src_port == UINT16_MAX)
364 input_set |= ICE_INSET_TUN_SRC_PORT;
365 if (udp_mask->hdr.dst_port == UINT16_MAX)
366 input_set |= ICE_INSET_TUN_DST_PORT;
370 case RTE_FLOW_ITEM_TYPE_TCP:
371 tcp_spec = item->spec;
372 tcp_mask = item->mask;
374 if (!(tcp_spec && tcp_mask)) {
375 rte_flow_error_set(error, EINVAL,
376 RTE_FLOW_ERROR_TYPE_ITEM,
377 item, "Invalid TCP mask");
381 /* Check TCP mask and update input set */
382 if (tcp_mask->hdr.sent_seq ||
383 tcp_mask->hdr.recv_ack ||
384 tcp_mask->hdr.data_off ||
385 tcp_mask->hdr.tcp_flags ||
386 tcp_mask->hdr.rx_win ||
387 tcp_mask->hdr.cksum ||
388 tcp_mask->hdr.tcp_urp) {
389 rte_flow_error_set(error, EINVAL,
390 RTE_FLOW_ERROR_TYPE_ITEM,
397 if (tcp_mask->hdr.src_port == UINT16_MAX)
398 input_set |= ICE_INSET_SRC_PORT;
399 if (tcp_mask->hdr.dst_port == UINT16_MAX)
400 input_set |= ICE_INSET_DST_PORT;
403 if (tcp_mask->hdr.src_port == UINT16_MAX)
404 input_set |= ICE_INSET_TUN_SRC_PORT;
405 if (tcp_mask->hdr.dst_port == UINT16_MAX)
406 input_set |= ICE_INSET_TUN_DST_PORT;
410 case RTE_FLOW_ITEM_TYPE_SCTP:
411 sctp_spec = item->spec;
412 sctp_mask = item->mask;
414 if (!(sctp_spec && sctp_mask)) {
415 rte_flow_error_set(error, EINVAL,
416 RTE_FLOW_ERROR_TYPE_ITEM,
417 item, "Invalid SCTP mask");
421 /* Check SCTP mask and update input set */
422 if (sctp_mask->hdr.cksum) {
423 rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_ITEM,
426 "Invalid SCTP mask");
431 if (sctp_mask->hdr.src_port == UINT16_MAX)
432 input_set |= ICE_INSET_SRC_PORT;
433 if (sctp_mask->hdr.dst_port == UINT16_MAX)
434 input_set |= ICE_INSET_DST_PORT;
437 if (sctp_mask->hdr.src_port == UINT16_MAX)
438 input_set |= ICE_INSET_TUN_SRC_PORT;
439 if (sctp_mask->hdr.dst_port == UINT16_MAX)
440 input_set |= ICE_INSET_TUN_DST_PORT;
444 case RTE_FLOW_ITEM_TYPE_ICMP:
445 icmp_mask = item->mask;
446 if (icmp_mask->hdr.icmp_code ||
447 icmp_mask->hdr.icmp_cksum ||
448 icmp_mask->hdr.icmp_ident ||
449 icmp_mask->hdr.icmp_seq_nb) {
450 rte_flow_error_set(error, EINVAL,
451 RTE_FLOW_ERROR_TYPE_ITEM,
453 "Invalid ICMP mask");
457 if (icmp_mask->hdr.icmp_type == UINT8_MAX)
458 input_set |= ICE_INSET_ICMP;
460 case RTE_FLOW_ITEM_TYPE_ICMP6:
461 icmp6_mask = item->mask;
462 if (icmp6_mask->code ||
463 icmp6_mask->checksum) {
464 rte_flow_error_set(error, EINVAL,
465 RTE_FLOW_ERROR_TYPE_ITEM,
467 "Invalid ICMP6 mask");
471 if (icmp6_mask->type == UINT8_MAX)
472 input_set |= ICE_INSET_ICMP6;
474 case RTE_FLOW_ITEM_TYPE_VXLAN:
475 vxlan_spec = item->spec;
476 vxlan_mask = item->mask;
477 /* Check if VXLAN item is used to describe protocol.
478 * If yes, both spec and mask should be NULL.
479 * If no, both spec and mask shouldn't be NULL.
481 if ((!vxlan_spec && vxlan_mask) ||
482 (vxlan_spec && !vxlan_mask)) {
483 rte_flow_error_set(error, EINVAL,
484 RTE_FLOW_ERROR_TYPE_ITEM,
486 "Invalid VXLAN item");
491 case RTE_FLOW_ITEM_TYPE_NVGRE:
492 nvgre_spec = item->spec;
493 nvgre_mask = item->mask;
494 /* Check if VXLAN item is used to describe protocol.
495 * If yes, both spec and mask should be NULL.
496 * If no, both spec and mask shouldn't be NULL.
498 if ((!nvgre_spec && nvgre_mask) ||
499 (nvgre_spec && !nvgre_mask)) {
500 rte_flow_error_set(error, EINVAL,
501 RTE_FLOW_ERROR_TYPE_ITEM,
503 "Invalid NVGRE item");
509 rte_flow_error_set(error, EINVAL,
510 RTE_FLOW_ERROR_TYPE_ITEM,
512 "Invalid mask no exist");
519 static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
520 uint64_t inset, struct rte_flow_error *error)
524 /* get valid field */
525 fields = ice_get_flow_field(pattern, error);
526 if (!fields || fields & (~inset)) {
527 rte_flow_error_set(error, EINVAL,
528 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
530 "Invalid input set");
537 static int ice_flow_valid_action(struct rte_eth_dev *dev,
538 const struct rte_flow_action *actions,
539 struct rte_flow_error *error)
541 const struct rte_flow_action_queue *act_q;
544 switch (actions->type) {
545 case RTE_FLOW_ACTION_TYPE_QUEUE:
546 act_q = actions->conf;
547 queue = act_q->index;
548 if (queue >= dev->data->nb_rx_queues) {
549 rte_flow_error_set(error, EINVAL,
550 RTE_FLOW_ERROR_TYPE_ACTION,
551 actions, "Invalid queue ID for"
552 " ethertype_filter.");
556 case RTE_FLOW_ACTION_TYPE_DROP:
559 rte_flow_error_set(error, EINVAL,
560 RTE_FLOW_ERROR_TYPE_ACTION, actions,
569 ice_flow_validate(struct rte_eth_dev *dev,
570 const struct rte_flow_attr *attr,
571 const struct rte_flow_item pattern[],
572 const struct rte_flow_action actions[],
573 struct rte_flow_error *error)
576 int ret = ICE_ERR_NOT_SUPPORTED;
579 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
580 NULL, "NULL pattern.");
585 rte_flow_error_set(error, EINVAL,
586 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
587 NULL, "NULL action.");
592 rte_flow_error_set(error, EINVAL,
593 RTE_FLOW_ERROR_TYPE_ATTR,
594 NULL, "NULL attribute.");
598 ret = ice_flow_valid_attr(attr, error);
602 inset = ice_flow_valid_pattern(pattern, error);
606 ret = ice_flow_valid_inset(pattern, inset, error);
610 ret = ice_flow_valid_action(dev, actions, error);
617 static struct rte_flow *
618 ice_flow_create(struct rte_eth_dev *dev,
619 const struct rte_flow_attr *attr,
620 const struct rte_flow_item pattern[],
621 const struct rte_flow_action actions[],
622 struct rte_flow_error *error)
624 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
625 struct rte_flow *flow = NULL;
628 flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
630 rte_flow_error_set(error, ENOMEM,
631 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
632 "Failed to allocate memory");
636 ret = ice_flow_validate(dev, attr, pattern, actions, error);
640 ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
644 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
648 rte_flow_error_set(error, -ret,
649 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
650 "Failed to create flow.");
656 ice_flow_destroy(struct rte_eth_dev *dev,
657 struct rte_flow *flow,
658 struct rte_flow_error *error)
660 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
663 ret = ice_destroy_switch_filter(pf, flow, error);
666 TAILQ_REMOVE(&pf->flow_list, flow, node);
669 rte_flow_error_set(error, -ret,
670 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
671 "Failed to destroy flow.");
678 ice_flow_flush(struct rte_eth_dev *dev,
679 struct rte_flow_error *error)
681 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
682 struct rte_flow *p_flow;
685 TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
686 ret = ice_flow_destroy(dev, p_flow, error);
688 rte_flow_error_set(error, -ret,
689 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
690 "Failed to flush SW flows.");