net: add rte prefix to IP structure
[dpdk.git] / drivers / net / qede / qede_filter.c
index bdf2885..2caf314 100644 (file)
@@ -8,6 +8,7 @@
 #include <rte_tcp.h>
 #include <rte_sctp.h>
 #include <rte_errno.h>
+#include <rte_flow_driver.h>
 
 #include "qede_ethdev.h"
 
@@ -220,7 +221,7 @@ qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
        case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
        case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
                /* fill the common ip header */
-               arfs->tuple.eth_proto = ETHER_TYPE_IPv4;
+               arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPv4;
                arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
                arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
                arfs->tuple.ip_proto = next_proto[input->flow_type];
@@ -236,7 +237,7 @@ qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
                break;
        case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
        case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
-               arfs->tuple.eth_proto = ETHER_TYPE_IPv6;
+               arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPv6;
                arfs->tuple.ip_proto = next_proto[input->flow_type];
                rte_memcpy(arfs->tuple.dst_ipv6,
                           &input->flow.ipv6_flow.dst_ip,
@@ -456,31 +457,31 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        uint16_t *ether_type;
        uint8_t *raw_pkt;
-       struct ipv4_hdr *ip;
-       struct ipv6_hdr *ip6;
+       struct rte_ipv4_hdr *ip;
+       struct rte_ipv6_hdr *ip6;
        struct udp_hdr *udp;
        struct tcp_hdr *tcp;
        uint16_t len;
 
        raw_pkt = (uint8_t *)buff;
 
-       len =  2 * sizeof(struct ether_addr);
-       raw_pkt += 2 * sizeof(struct ether_addr);
+       len =  2 * sizeof(struct rte_ether_addr);
+       raw_pkt += 2 * sizeof(struct rte_ether_addr);
        ether_type = (uint16_t *)raw_pkt;
        raw_pkt += sizeof(uint16_t);
        len += sizeof(uint16_t);
 
        *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
        switch (arfs->tuple.eth_proto) {
-       case ETHER_TYPE_IPv4:
-               ip = (struct ipv4_hdr *)raw_pkt;
+       case RTE_ETHER_TYPE_IPv4:
+               ip = (struct rte_ipv4_hdr *)raw_pkt;
                ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
-               ip->total_length = sizeof(struct ipv4_hdr);
+               ip->total_length = sizeof(struct rte_ipv4_hdr);
                ip->next_proto_id = arfs->tuple.ip_proto;
                ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
                ip->dst_addr = arfs->tuple.dst_ipv4;
                ip->src_addr = arfs->tuple.src_ipv4;
-               len += sizeof(struct ipv4_hdr);
+               len += sizeof(struct rte_ipv4_hdr);
                params->ipv4 = true;
 
                raw_pkt = (uint8_t *)buff;
@@ -505,8 +506,8 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
                        params->tcp = true;
                }
                break;
-       case ETHER_TYPE_IPv6:
-               ip6 = (struct ipv6_hdr *)raw_pkt;
+       case RTE_ETHER_TYPE_IPv6:
+               ip6 = (struct rte_ipv6_hdr *)raw_pkt;
                ip6->proto = arfs->tuple.ip_proto;
                ip6->vtc_flow =
                        rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
@@ -515,7 +516,7 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
                           IPV6_ADDR_LEN);
                rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
                           IPV6_ADDR_LEN);
-               len += sizeof(struct ipv6_hdr);
+               len += sizeof(struct rte_ipv6_hdr);
                params->ipv6 = true;
 
                raw_pkt = (uint8_t *)buff;
@@ -991,25 +992,25 @@ qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
        break;
        case ECORE_FILTER_MAC:
                memcpy(ucast->mac, conf->outer_mac.addr_bytes,
-                      ETHER_ADDR_LEN);
+                      RTE_ETHER_ADDR_LEN);
        break;
        case ECORE_FILTER_INNER_MAC:
                memcpy(ucast->mac, conf->inner_mac.addr_bytes,
-                      ETHER_ADDR_LEN);
+                      RTE_ETHER_ADDR_LEN);
        break;
        case ECORE_FILTER_MAC_VNI_PAIR:
                memcpy(ucast->mac, conf->outer_mac.addr_bytes,
-                       ETHER_ADDR_LEN);
+                       RTE_ETHER_ADDR_LEN);
                ucast->vni = conf->tenant_id;
        break;
        case ECORE_FILTER_INNER_MAC_VNI_PAIR:
                memcpy(ucast->mac, conf->inner_mac.addr_bytes,
-                       ETHER_ADDR_LEN);
+                       RTE_ETHER_ADDR_LEN);
                ucast->vni = conf->tenant_id;
        break;
        case ECORE_FILTER_INNER_PAIR:
                memcpy(ucast->mac, conf->inner_mac.addr_bytes,
-                       ETHER_ADDR_LEN);
+                       RTE_ETHER_ADDR_LEN);
                ucast->vlan = conf->inner_vlan;
        break;
        default:
@@ -1159,6 +1160,329 @@ qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
+static int
+qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *attr,
+                       struct rte_flow_error *error)
+{
+       if (attr == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+                                  "NULL attribute");
+               return -rte_errno;
+       }
+
+       if (attr->group != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+                                  "Groups are not supported");
+               return -rte_errno;
+       }
+
+       if (attr->priority != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+                                  "Priorities are not supported");
+               return -rte_errno;
+       }
+
+       if (attr->egress != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+                                  "Egress is not supported");
+               return -rte_errno;
+       }
+
+       if (attr->transfer != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+                                  "Transfer is not supported");
+               return -rte_errno;
+       }
+
+       if (attr->ingress == 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+                                  "Only ingress is supported");
+               return -rte_errno;
+       }
+
+       return 0;
+}
+
+static int
+qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,
+                       const struct rte_flow_item pattern[],
+                       struct rte_flow_error *error,
+                       struct rte_flow *flow)
+{
+       bool l3 = false, l4 = false;
+
+       if (pattern == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+                                  "NULL pattern");
+               return -rte_errno;
+       }
+
+       for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+               if (!pattern->spec) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Item spec not defined");
+                       return -rte_errno;
+               }
+
+               if (pattern->last) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Item last not supported");
+                       return -rte_errno;
+               }
+
+               if (pattern->mask) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Item mask not supported");
+                       return -rte_errno;
+               }
+
+               /* Below validation is only for 4 tuple flow
+                * (GFT_PROFILE_TYPE_4_TUPLE)
+                * - src and dst L3 address (IPv4 or IPv6)
+                * - src and dst L4 port (TCP or UDP)
+                */
+
+               switch (pattern->type) {
+               case RTE_FLOW_ITEM_TYPE_IPV4:
+                       l3 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_ipv4 *spec;
+
+                               spec = pattern->spec;
+                               flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
+                               flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
+                               flow->entry.tuple.eth_proto =
+                                       RTE_ETHER_TYPE_IPv4;
+                       }
+                       break;
+
+               case RTE_FLOW_ITEM_TYPE_IPV6:
+                       l3 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_ipv6 *spec;
+
+                               spec = pattern->spec;
+                               rte_memcpy(flow->entry.tuple.src_ipv6,
+                                          spec->hdr.src_addr,
+                                          IPV6_ADDR_LEN);
+                               rte_memcpy(flow->entry.tuple.dst_ipv6,
+                                          spec->hdr.dst_addr,
+                                          IPV6_ADDR_LEN);
+                               flow->entry.tuple.eth_proto =
+                                       RTE_ETHER_TYPE_IPv6;
+                       }
+                       break;
+
+               case RTE_FLOW_ITEM_TYPE_UDP:
+                       l4 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_udp *spec;
+
+                               spec = pattern->spec;
+                               flow->entry.tuple.src_port =
+                                               spec->hdr.src_port;
+                               flow->entry.tuple.dst_port =
+                                               spec->hdr.dst_port;
+                               flow->entry.tuple.ip_proto = IPPROTO_UDP;
+                       }
+                       break;
+
+               case RTE_FLOW_ITEM_TYPE_TCP:
+                       l4 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_tcp *spec;
+
+                               spec = pattern->spec;
+                               flow->entry.tuple.src_port =
+                                               spec->hdr.src_port;
+                               flow->entry.tuple.dst_port =
+                                               spec->hdr.dst_port;
+                               flow->entry.tuple.ip_proto = IPPROTO_TCP;
+                       }
+
+                       break;
+               default:
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
+                       return -rte_errno;
+               }
+       }
+
+       if (!(l3 && l4)) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                  pattern,
+                                  "Item types need to have both L3 and L4 protocols");
+               return -rte_errno;
+       }
+
+       return 0;
+}
+
+static int
+qede_flow_parse_actions(struct rte_eth_dev *dev,
+                       const struct rte_flow_action actions[],
+                       struct rte_flow_error *error,
+                       struct rte_flow *flow)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+       const struct rte_flow_action_queue *queue;
+
+       if (actions == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+                                  "NULL actions");
+               return -rte_errno;
+       }
+
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+               switch (actions->type) {
+               case RTE_FLOW_ACTION_TYPE_QUEUE:
+                       queue = actions->conf;
+
+                       if (queue->index >= QEDE_RSS_COUNT(qdev)) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ACTION,
+                                                  actions,
+                                                  "Bad QUEUE action");
+                               return -rte_errno;
+                       }
+
+                       if (flow)
+                               flow->entry.rx_queue = queue->index;
+
+                       break;
+
+               default:
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ACTION,
+                                          actions,
+                                          "Action is not supported - only ACTION_TYPE_QUEUE supported");
+                       return -rte_errno;
+               }
+       }
+
+       return 0;
+}
+
+static int
+qede_flow_parse(struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item patterns[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error,
+               struct rte_flow *flow)
+
+{
+       int rc = 0;
+
+       rc = qede_flow_validate_attr(dev, attr, error);
+       if (rc)
+               return rc;
+
+       /* parse and validate item pattern and actions.
+        * Given item list and actions will be translate to qede PMD
+        * specific arfs structure.
+        */
+       rc = qede_flow_parse_pattern(dev, patterns, error, flow);
+       if (rc)
+               return rc;
+
+       rc = qede_flow_parse_actions(dev, actions, error, flow);
+
+       return rc;
+}
+
+static int
+qede_flow_validate(struct rte_eth_dev *dev,
+                  const struct rte_flow_attr *attr,
+                  const struct rte_flow_item patterns[],
+                  const struct rte_flow_action actions[],
+                  struct rte_flow_error *error)
+{
+       return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
+}
+
+static struct rte_flow *
+qede_flow_create(struct rte_eth_dev *dev,
+                const struct rte_flow_attr *attr,
+                const struct rte_flow_item pattern[],
+                const struct rte_flow_action actions[],
+                struct rte_flow_error *error)
+{
+       struct rte_flow *flow = NULL;
+       int rc;
+
+       flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
+       if (flow == NULL) {
+               rte_flow_error_set(error, ENOMEM,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "Failed to allocate memory");
+               return NULL;
+       }
+
+       rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
+       if (rc < 0) {
+               rte_free(flow);
+               return NULL;
+       }
+
+       rc = qede_config_arfs_filter(dev, &flow->entry, true);
+       if (rc < 0) {
+               rte_flow_error_set(error, rc,
+                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                  "Failed to configure flow filter");
+               rte_free(flow);
+               return NULL;
+       }
+
+       return flow;
+}
+
+static int
+qede_flow_destroy(struct rte_eth_dev *eth_dev,
+                 struct rte_flow *flow,
+                 struct rte_flow_error *error)
+{
+       int rc = 0;
+
+       rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
+       if (rc < 0) {
+               rte_flow_error_set(error, rc,
+                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                  "Failed to delete flow filter");
+               rte_free(flow);
+       }
+
+       return rc;
+}
+
+const struct rte_flow_ops qede_flow_ops = {
+       .validate = qede_flow_validate,
+       .create = qede_flow_create,
+       .destroy = qede_flow_destroy,
+};
+
 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
                         enum rte_filter_type filter_type,
                         enum rte_filter_op filter_op,
@@ -1195,6 +1519,17 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
                return qede_fdir_filter_conf(eth_dev, filter_op, arg);
        case RTE_ETH_FILTER_NTUPLE:
                return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+       case RTE_ETH_FILTER_GENERIC:
+               if (ECORE_IS_CMT(edev)) {
+                       DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+                       return -ENOTSUP;
+               }
+
+               if (filter_op != RTE_ETH_FILTER_GET)
+                       return -EINVAL;
+
+               *(const void **)arg = &qede_flow_ops;
+               return 0;
        case RTE_ETH_FILTER_MACVLAN:
        case RTE_ETH_FILTER_ETHERTYPE:
        case RTE_ETH_FILTER_FLEXIBLE:
@@ -1210,5 +1545,3 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
 
        return 0;
 }
-
-/* RTE_FLOW */