net/ixgbe: parse n-tuple filter
authorWei Zhao <wei.zhao1@intel.com>
Fri, 13 Jan 2017 08:13:05 +0000 (16:13 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 17 Jan 2017 18:41:43 +0000 (19:41 +0100)
Add rule validate function and check if the rule is a n-tuple rule,
and get the n-tuple info.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Acked-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Wei Dai <wei.dai@intel.com>
drivers/net/ixgbe/ixgbe_flow.c

index 1499391..417728b 100644 (file)
 
 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
                struct rte_flow_error *error);
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+                                       const struct rte_flow_item pattern[],
+                                       const struct rte_flow_action actions[],
+                                       struct rte_eth_ntuple_filter *filter,
+                                       struct rte_flow_error *error);
+static int
+ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+                                       const struct rte_flow_item pattern[],
+                                       const struct rte_flow_action actions[],
+                                       struct rte_eth_ntuple_filter *filter,
+                                       struct rte_flow_error *error);
+static int
+ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error);
 
 const struct rte_flow_ops ixgbe_flow_ops = {
-       NULL,
+       ixgbe_flow_validate,
        NULL,
        NULL,
        ixgbe_flow_flush,
        NULL,
 };
 
+#define IXGBE_MIN_N_TUPLE_PRIO 1
+#define IXGBE_MAX_N_TUPLE_PRIO 7
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
+       do {            \
+               item = pattern + index;\
+               while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+               index++;                                \
+               item = pattern + index;         \
+               }                                               \
+       } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)\
+       do {                                                            \
+               act = actions + index;                                  \
+               while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+               index++;                                        \
+               act = actions + index;                          \
+               }                                                       \
+       } while (0)
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM                Spec                    Mask
+ * ETH         NULL                    NULL
+ * IPV4                src_addr 192.168.1.20   0xFFFFFFFF
+ *             dst_addr 192.167.3.50   0xFFFFFFFF
+ *             next_proto_id   17      0xFF
+ * UDP/TCP     src_port        80      0xFFFF
+ *             dst_port        80      0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+                        const struct rte_flow_item pattern[],
+                        const struct rte_flow_action actions[],
+                        struct rte_eth_ntuple_filter *filter,
+                        struct rte_flow_error *error)
+{
+       const struct rte_flow_item *item;
+       const struct rte_flow_action *act;
+       const struct rte_flow_item_ipv4 *ipv4_spec;
+       const struct rte_flow_item_ipv4 *ipv4_mask;
+       const struct rte_flow_item_tcp *tcp_spec;
+       const struct rte_flow_item_tcp *tcp_mask;
+       const struct rte_flow_item_udp *udp_spec;
+       const struct rte_flow_item_udp *udp_mask;
+       uint32_t index;
+
+       if (!pattern) {
+               rte_flow_error_set(error,
+                       EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+                       NULL, "NULL pattern.");
+               return -rte_errno;
+       }
+
+       if (!actions) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+                                  NULL, "NULL action.");
+               return -rte_errno;
+       }
+       if (!attr) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR,
+                                  NULL, "NULL attribute.");
+               return -rte_errno;
+       }
+
+       /* parse pattern */
+       index = 0;
+
+       /* the first not void item can be MAC or IPv4 */
+       NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+       if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+           item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by ntuple filter");
+               return -rte_errno;
+       }
+       /* Skip Ethernet */
+       if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error,
+                         EINVAL,
+                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                         item, "Not supported last point for range");
+                       return -rte_errno;
+
+               }
+               /* if the first item is MAC, the content should be NULL */
+               if (item->spec || item->mask) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
+               /* check if the next not void item is IPv4 */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+                       rte_flow_error_set(error,
+                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+                         item, "Not supported by ntuple filter");
+                         return -rte_errno;
+               }
+       }
+
+       /* get the IPv4 info */
+       if (!item->spec || !item->mask) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Invalid ntuple mask");
+               return -rte_errno;
+       }
+       /*Not supported last point for range*/
+       if (item->last) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                       item, "Not supported last point for range");
+               return -rte_errno;
+
+       }
+
+       ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+       /**
+        * Only support src & dst addresses, protocol,
+        * others should be masked.
+        */
+       if (ipv4_mask->hdr.version_ihl ||
+           ipv4_mask->hdr.type_of_service ||
+           ipv4_mask->hdr.total_length ||
+           ipv4_mask->hdr.packet_id ||
+           ipv4_mask->hdr.fragment_offset ||
+           ipv4_mask->hdr.time_to_live ||
+           ipv4_mask->hdr.hdr_checksum) {
+                       rte_flow_error_set(error,
+                       EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by ntuple filter");
+               return -rte_errno;
+       }
+
+       filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+       filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+       filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+       ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+       filter->dst_ip = ipv4_spec->hdr.dst_addr;
+       filter->src_ip = ipv4_spec->hdr.src_addr;
+       filter->proto  = ipv4_spec->hdr.next_proto_id;
+
+       /* check if the next not void item is TCP or UDP */
+       index++;
+       NEXT_ITEM_OF_PATTERN(item, pattern, index);
+       if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+           item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by ntuple filter");
+               return -rte_errno;
+       }
+
+       /* get the TCP/UDP info */
+       if (!item->spec || !item->mask) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Invalid ntuple mask");
+               return -rte_errno;
+       }
+
+       /*Not supported last point for range*/
+       if (item->last) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                       item, "Not supported last point for range");
+               return -rte_errno;
+
+       }
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+               tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+               /**
+                * Only support src & dst ports, tcp flags,
+                * others should be masked.
+                */
+               if (tcp_mask->hdr.sent_seq ||
+                   tcp_mask->hdr.recv_ack ||
+                   tcp_mask->hdr.data_off ||
+                   tcp_mask->hdr.rx_win ||
+                   tcp_mask->hdr.cksum ||
+                   tcp_mask->hdr.tcp_urp) {
+                       memset(filter, 0,
+                               sizeof(struct rte_eth_ntuple_filter));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
+
+               filter->dst_port_mask  = tcp_mask->hdr.dst_port;
+               filter->src_port_mask  = tcp_mask->hdr.src_port;
+               if (tcp_mask->hdr.tcp_flags == 0xFF) {
+                       filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+               } else if (!tcp_mask->hdr.tcp_flags) {
+                       filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+               } else {
+                       memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
+
+               tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+               filter->dst_port  = tcp_spec->hdr.dst_port;
+               filter->src_port  = tcp_spec->hdr.src_port;
+               filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+       } else {
+               udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+               /**
+                * Only support src & dst ports,
+                * others should be masked.
+                */
+               if (udp_mask->hdr.dgram_len ||
+                   udp_mask->hdr.dgram_cksum) {
+                       memset(filter, 0,
+                               sizeof(struct rte_eth_ntuple_filter));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
+
+               filter->dst_port_mask = udp_mask->hdr.dst_port;
+               filter->src_port_mask = udp_mask->hdr.src_port;
+
+               udp_spec = (const struct rte_flow_item_udp *)item->spec;
+               filter->dst_port = udp_spec->hdr.dst_port;
+               filter->src_port = udp_spec->hdr.src_port;
+       }
+
+       /* check if the next not void item is END */
+       index++;
+       NEXT_ITEM_OF_PATTERN(item, pattern, index);
+       if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by ntuple filter");
+               return -rte_errno;
+       }
+
+       /* parse action */
+       index = 0;
+
+       /**
+        * n-tuple only supports forwarding,
+        * check if the first not void action is QUEUE.
+        */
+       NEXT_ITEM_OF_ACTION(act, actions, index);
+       if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       item, "Not supported action.");
+               return -rte_errno;
+       }
+       filter->queue =
+               ((const struct rte_flow_action_queue *)act->conf)->index;
+
+       /* check if the next not void item is END */
+       index++;
+       NEXT_ITEM_OF_ACTION(act, actions, index);
+       if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       act, "Not supported action.");
+               return -rte_errno;
+       }
+
+       /* parse attr */
+       /* must be input direction */
+       if (!attr->ingress) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+                                  attr, "Only support ingress.");
+               return -rte_errno;
+       }
+
+       /* not supported */
+       if (attr->egress) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+                                  attr, "Not support egress.");
+               return -rte_errno;
+       }
+
+       if (attr->priority > 0xFFFF) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+                                  attr, "Error priority.");
+               return -rte_errno;
+       }
+       filter->priority = (uint16_t)attr->priority;
+       if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+           attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
+           filter->priority = 1;
+
+       return 0;
+}
+
+/* a specific function for ixgbe because the flags is specific */
+static int
+ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+                         const struct rte_flow_item pattern[],
+                         const struct rte_flow_action actions[],
+                         struct rte_eth_ntuple_filter *filter,
+                         struct rte_flow_error *error)
+{
+       int ret;
+
+       ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+       if (ret)
+               return ret;
+
+       /* Ixgbe doesn't support tcp flags. */
+       if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                  NULL, "Not supported by ntuple filter");
+               return -rte_errno;
+       }
+
+       /* Ixgbe doesn't support many priorities. */
+       if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+           filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       NULL, "Priority not supported by ntuple filter");
+               return -rte_errno;
+       }
+
+       if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+               filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+               filter->priority < IXGBE_5TUPLE_MIN_PRI)
+               return -rte_errno;
+
+       /* fixed value for ixgbe */
+       filter->flags = RTE_5TUPLE_FLAGS;
+       return 0;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error)
+{
+       struct rte_eth_ntuple_filter ntuple_filter;
+       int ret;
+
+       memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+       ret = ixgbe_parse_ntuple_filter(attr, pattern,
+                               actions, &ntuple_filter, error);
+       if (!ret)
+               return 0;
+
+       return ret;
+}
+
 /*  Destroy all flow rules associated with a port on ixgbe. */
 static int
 ixgbe_flow_flush(struct rte_eth_dev *dev,