net/enic: support flow counter action
[dpdk.git] / drivers / net / enic / enic_flow.c
index a32e25e..04fc351 100644 (file)
@@ -1,37 +1,11 @@
-/*
- * Copyright (c) 2017, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
  */
 
 #include <errno.h>
+#include <stdint.h>
 #include <rte_log.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_flow_driver.h>
 #include <rte_ether.h>
 #include <rte_ip.h>
 #include "vnic_dev.h"
 #include "vnic_nic.h"
 
-#ifdef RTE_LIBRTE_ENIC_DEBUG_FLOW
 #define FLOW_TRACE() \
-       RTE_LOG(DEBUG, PMD, "%s()\n", __func__)
+       rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
+               "%s()\n", __func__)
 #define FLOW_LOG(level, fmt, args...) \
-       RTE_LOG(level, PMD, fmt, ## args)
-#else
-#define FLOW_TRACE() do { } while (0)
-#define FLOW_LOG(level, fmt, args...) do { } while (0)
-#endif
+       rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
+               fmt "\n", ##args)
 
 /** Info about how to copy items into enic filters. */
 struct enic_items {
@@ -89,6 +60,9 @@ struct enic_action_cap {
 };
 
 /* Forward declarations */
+static enic_copy_item_fn enic_copy_item_ipv4_v1;
+static enic_copy_item_fn enic_copy_item_udp_v1;
+static enic_copy_item_fn enic_copy_item_tcp_v1;
 static enic_copy_item_fn enic_copy_item_eth_v2;
 static enic_copy_item_fn enic_copy_item_vlan_v2;
 static enic_copy_item_fn enic_copy_item_ipv4_v2;
@@ -101,6 +75,36 @@ static enic_copy_item_fn enic_copy_item_vxlan_v2;
 static copy_action_fn enic_copy_action_v1;
 static copy_action_fn enic_copy_action_v2;
 
+/**
+ * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
+ * is supported.
+ */
+static const struct enic_items enic_items_v1[] = {
+       [RTE_FLOW_ITEM_TYPE_IPV4] = {
+               .copy_item = enic_copy_item_ipv4_v1,
+               .valid_start_item = 1,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_UDP] = {
+               .copy_item = enic_copy_item_udp_v1,
+               .valid_start_item = 0,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_IPV4,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_TCP] = {
+               .copy_item = enic_copy_item_tcp_v1,
+               .valid_start_item = 0,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_IPV4,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+};
+
 /**
  * NICs have Advanced Filters capability but they are disabled. This means
  * that layer 3 must be specified.
@@ -252,6 +256,9 @@ static const struct enic_items enic_items_v3[] = {
 
 /** Filtering capabilities indexed this NICs supported filter type. */
 static const struct enic_filter_cap enic_filter_cap[] = {
+       [FILTER_IPV4_5TUPLE] = {
+               .item_info = enic_items_v1,
+       },
        [FILTER_USNIC_IP] = {
                .item_info = enic_items_v2,
        },
@@ -267,10 +274,27 @@ static const enum rte_flow_action_type enic_supported_actions_v1[] = {
 };
 
 /** Supported actions for newer NICs */
-static const enum rte_flow_action_type enic_supported_actions_v2[] = {
+static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
+       RTE_FLOW_ACTION_TYPE_QUEUE,
+       RTE_FLOW_ACTION_TYPE_MARK,
+       RTE_FLOW_ACTION_TYPE_FLAG,
+       RTE_FLOW_ACTION_TYPE_END,
+};
+
+static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
        RTE_FLOW_ACTION_TYPE_QUEUE,
        RTE_FLOW_ACTION_TYPE_MARK,
        RTE_FLOW_ACTION_TYPE_FLAG,
+       RTE_FLOW_ACTION_TYPE_DROP,
+       RTE_FLOW_ACTION_TYPE_END,
+};
+
+static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
+       RTE_FLOW_ACTION_TYPE_QUEUE,
+       RTE_FLOW_ACTION_TYPE_MARK,
+       RTE_FLOW_ACTION_TYPE_FLAG,
+       RTE_FLOW_ACTION_TYPE_DROP,
+       RTE_FLOW_ACTION_TYPE_COUNT,
        RTE_FLOW_ACTION_TYPE_END,
 };
 
@@ -280,11 +304,184 @@ static const struct enic_action_cap enic_action_cap[] = {
                .actions = enic_supported_actions_v1,
                .copy_fn = enic_copy_action_v1,
        },
-       [FILTER_ACTION_V2_ALL] = {
-               .actions = enic_supported_actions_v2,
+       [FILTER_ACTION_FILTER_ID_FLAG] = {
+               .actions = enic_supported_actions_v2_id,
+               .copy_fn = enic_copy_action_v2,
+       },
+       [FILTER_ACTION_DROP_FLAG] = {
+               .actions = enic_supported_actions_v2_drop,
+               .copy_fn = enic_copy_action_v2,
+       },
+       [FILTER_ACTION_COUNTER_FLAG] = {
+               .actions = enic_supported_actions_v2_count,
                .copy_fn = enic_copy_action_v2,
        },
 };
+
+static int
+mask_exact_match(const u8 *supported, const u8 *supplied,
+                unsigned int size)
+{
+       unsigned int i;
+       for (i = 0; i < size; i++) {
+               if (supported[i] != supplied[i])
+                       return 0;
+       }
+       return 1;
+}
+
+/**
+ * Copy IPv4 item into version 1 NIC filter.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param enic_filter[out]
+ *   Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ *   Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
+                      struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+       const struct rte_flow_item_ipv4 *spec = item->spec;
+       const struct rte_flow_item_ipv4 *mask = item->mask;
+       struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+       struct ipv4_hdr supported_mask = {
+               .src_addr = 0xffffffff,
+               .dst_addr = 0xffffffff,
+       };
+
+       FLOW_TRACE();
+
+       if (*inner_ofst)
+               return ENOTSUP;
+
+       if (!mask)
+               mask = &rte_flow_item_ipv4_mask;
+
+       /* This is an exact match filter, both fields must be set */
+       if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
+               FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
+               return ENOTSUP;
+       }
+
+       /* check that the suppied mask exactly matches capabilty */
+       if (!mask_exact_match((const u8 *)&supported_mask,
+                             (const u8 *)item->mask, sizeof(*mask))) {
+               FLOW_LOG(ERR, "IPv4 exact match mask");
+               return ENOTSUP;
+       }
+
+       enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+       enic_5tup->src_addr = spec->hdr.src_addr;
+       enic_5tup->dst_addr = spec->hdr.dst_addr;
+
+       return 0;
+}
+
+/**
+ * Copy UDP item into version 1 NIC filter.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param enic_filter[out]
+ *   Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ *   Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_udp_v1(const struct rte_flow_item *item,
+                     struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+       const struct rte_flow_item_udp *spec = item->spec;
+       const struct rte_flow_item_udp *mask = item->mask;
+       struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+       struct udp_hdr supported_mask = {
+               .src_port = 0xffff,
+               .dst_port = 0xffff,
+       };
+
+       FLOW_TRACE();
+
+       if (*inner_ofst)
+               return ENOTSUP;
+
+       if (!mask)
+               mask = &rte_flow_item_udp_mask;
+
+       /* This is an exact match filter, both ports must be set */
+       if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
+               FLOW_LOG(ERR, "UDP exact match src/dst addr");
+               return ENOTSUP;
+       }
+
+       /* check that the suppied mask exactly matches capabilty */
+       if (!mask_exact_match((const u8 *)&supported_mask,
+                             (const u8 *)item->mask, sizeof(*mask))) {
+               FLOW_LOG(ERR, "UDP exact match mask");
+               return ENOTSUP;
+       }
+
+       enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+       enic_5tup->src_port = spec->hdr.src_port;
+       enic_5tup->dst_port = spec->hdr.dst_port;
+       enic_5tup->protocol = PROTO_UDP;
+
+       return 0;
+}
+
+/**
+ * Copy TCP item into version 1 NIC filter.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param enic_filter[out]
+ *   Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ *   Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_tcp_v1(const struct rte_flow_item *item,
+                     struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+       const struct rte_flow_item_tcp *spec = item->spec;
+       const struct rte_flow_item_tcp *mask = item->mask;
+       struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+       struct tcp_hdr supported_mask = {
+               .src_port = 0xffff,
+               .dst_port = 0xffff,
+       };
+
+       FLOW_TRACE();
+
+       if (*inner_ofst)
+               return ENOTSUP;
+
+       if (!mask)
+               mask = &rte_flow_item_tcp_mask;
+
+       /* This is an exact match filter, both ports must be set */
+       if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
+               FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
+               return ENOTSUP;
+       }
+
+       /* check that the suppied mask exactly matches capabilty */
+       if (!mask_exact_match((const u8 *)&supported_mask,
+                            (const u8 *)item->mask, sizeof(*mask))) {
+               FLOW_LOG(ERR, "TCP exact match mask");
+               return ENOTSUP;
+       }
+
+       enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+       enic_5tup->src_port = spec->hdr.src_port;
+       enic_5tup->dst_port = spec->hdr.dst_port;
+       enic_5tup->protocol = PROTO_TCP;
+
+       return 0;
+}
+
 /**
  * Copy ETH item into version 2 NIC filter.
  *
@@ -373,16 +570,21 @@ enic_copy_item_vlan_v2(const struct rte_flow_item *item,
        if (!spec)
                return 0;
 
-       /* Don't support filtering in tpid */
-       if (mask) {
-               if (mask->tpid != 0)
-                       return ENOTSUP;
-       } else {
+       if (!mask)
                mask = &rte_flow_item_vlan_mask;
-               RTE_ASSERT(mask->tpid == 0);
-       }
 
        if (*inner_ofst == 0) {
+               struct ether_hdr *eth_mask =
+                       (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
+               struct ether_hdr *eth_val =
+                       (void *)gp->layer[FILTER_GENERIC_1_L2].val;
+
+               /* Outer TPID cannot be matched */
+               if (eth_mask->ether_type)
+                       return ENOTSUP;
+               eth_mask->ether_type = mask->inner_type;
+               eth_val->ether_type = spec->inner_type;
+
                /* Outer header. Use the vlan mask/val fields */
                gp->mask_vlan = mask->tci;
                gp->val_vlan = spec->tci;
@@ -781,6 +983,9 @@ static int
 enic_copy_action_v1(const struct rte_flow_action actions[],
                    struct filter_action_v2 *enic_action)
 {
+       enum { FATE = 1, };
+       uint32_t overlap = 0;
+
        FLOW_TRACE();
 
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -792,6 +997,10 @@ enic_copy_action_v1(const struct rte_flow_action actions[],
                        const struct rte_flow_action_queue *queue =
                                (const struct rte_flow_action_queue *)
                                actions->conf;
+
+                       if (overlap & FATE)
+                               return ENOTSUP;
+                       overlap |= FATE;
                        enic_action->rq_idx =
                                enic_rte_rq_idx_to_sop_idx(queue->index);
                        break;
@@ -801,6 +1010,8 @@ enic_copy_action_v1(const struct rte_flow_action actions[],
                        break;
                }
        }
+       if (!(overlap & FATE))
+               return ENOTSUP;
        enic_action->type = FILTER_ACTION_RQ_STEERING;
        return 0;
 }
@@ -818,6 +1029,9 @@ static int
 enic_copy_action_v2(const struct rte_flow_action actions[],
                    struct filter_action_v2 *enic_action)
 {
+       enum { FATE = 1, MARK = 2, };
+       uint32_t overlap = 0;
+
        FLOW_TRACE();
 
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -826,6 +1040,10 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
                        const struct rte_flow_action_queue *queue =
                                (const struct rte_flow_action_queue *)
                                actions->conf;
+
+                       if (overlap & FATE)
+                               return ENOTSUP;
+                       overlap |= FATE;
                        enic_action->rq_idx =
                                enic_rte_rq_idx_to_sop_idx(queue->index);
                        enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
@@ -836,6 +1054,9 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
                                (const struct rte_flow_action_mark *)
                                actions->conf;
 
+                       if (overlap & MARK)
+                               return ENOTSUP;
+                       overlap |= MARK;
                        /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
                         * in the range of allows mark ids.
                         */
@@ -846,10 +1067,24 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_FLAG: {
+                       if (overlap & MARK)
+                               return ENOTSUP;
+                       overlap |= MARK;
                        enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
                        enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
                        break;
                }
+               case RTE_FLOW_ACTION_TYPE_DROP: {
+                       if (overlap & FATE)
+                               return ENOTSUP;
+                       overlap |= FATE;
+                       enic_action->flags |= FILTER_ACTION_DROP_FLAG;
+                       break;
+               }
+               case RTE_FLOW_ACTION_TYPE_COUNT: {
+                       enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
+                       break;
+               }
                case RTE_FLOW_ACTION_TYPE_VOID:
                        continue;
                default:
@@ -857,6 +1092,8 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
                        break;
                }
        }
+       if (!(overlap & FATE))
+               return ENOTSUP;
        enic_action->type = FILTER_ACTION_V2;
        return 0;
 }
@@ -878,11 +1115,6 @@ enic_match_action(const struct rte_flow_action *action,
 static const struct enic_filter_cap *
 enic_get_filter_cap(struct enic *enic)
 {
-       /* FIXME: only support advanced filters for now */
-       if ((enic->flow_filter_mode != FILTER_DPDK_1) &&
-          (enic->flow_filter_mode != FILTER_USNIC_IP))
-               return (const struct enic_filter_cap *)NULL;
-
        if (enic->flow_filter_mode)
                return &enic_filter_cap[enic->flow_filter_mode];
 
@@ -893,14 +1125,156 @@ enic_get_filter_cap(struct enic *enic)
 static const struct enic_action_cap *
 enic_get_action_cap(struct enic *enic)
 {
-       static const struct enic_action_cap *ea;
-
-       if (enic->filter_tags)
-               ea = &enic_action_cap[FILTER_ACTION_V2_ALL];
+       const struct enic_action_cap *ea;
+       uint8_t actions;
+
+       actions = enic->filter_actions;
+       if (actions & FILTER_ACTION_COUNTER_FLAG)
+               ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
+       else if (actions & FILTER_ACTION_DROP_FLAG)
+               ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
+       else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
+               ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
        else
                ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
        return ea;
 }
+
+/* Debug function to dump internal NIC action structure. */
+static void
+enic_dump_actions(const struct filter_action_v2 *ea)
+{
+       if (ea->type == FILTER_ACTION_RQ_STEERING) {
+               FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
+       } else if (ea->type == FILTER_ACTION_V2) {
+               FLOW_LOG(INFO, "Actions(V2)\n");
+               if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
+                       FLOW_LOG(INFO, "\tqueue: %u\n",
+                              enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
+               if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
+                       FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
+       }
+}
+
+/* Debug function to dump internal NIC filter structure. */
+static void
+enic_dump_filter(const struct filter_v2 *filt)
+{
+       const struct filter_generic_1 *gp;
+       int i, j, mbyte;
+       char buf[128], *bp;
+       char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
+       char l4csum[16], ipfrag[16];
+
+       switch (filt->type) {
+       case FILTER_IPV4_5TUPLE:
+               FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
+               break;
+       case FILTER_USNIC_IP:
+       case FILTER_DPDK_1:
+               /* FIXME: this should be a loop */
+               gp = &filt->u.generic_1;
+               FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
+                      gp->val_vlan, gp->mask_vlan);
+
+               if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
+                       sprintf(ip4, "%s ",
+                               (gp->val_flags & FILTER_GENERIC_1_IPV4)
+                                ? "ip4(y)" : "ip4(n)");
+               else
+                       sprintf(ip4, "%s ", "ip4(x)");
+
+               if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
+                       sprintf(ip6, "%s ",
+                               (gp->val_flags & FILTER_GENERIC_1_IPV4)
+                                ? "ip6(y)" : "ip6(n)");
+               else
+                       sprintf(ip6, "%s ", "ip6(x)");
+
+               if (gp->mask_flags & FILTER_GENERIC_1_UDP)
+                       sprintf(udp, "%s ",
+                               (gp->val_flags & FILTER_GENERIC_1_UDP)
+                                ? "udp(y)" : "udp(n)");
+               else
+                       sprintf(udp, "%s ", "udp(x)");
+
+               if (gp->mask_flags & FILTER_GENERIC_1_TCP)
+                       sprintf(tcp, "%s ",
+                               (gp->val_flags & FILTER_GENERIC_1_TCP)
+                                ? "tcp(y)" : "tcp(n)");
+               else
+                       sprintf(tcp, "%s ", "tcp(x)");
+
+               if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
+                       sprintf(tcpudp, "%s ",
+                               (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
+                                ? "tcpudp(y)" : "tcpudp(n)");
+               else
+                       sprintf(tcpudp, "%s ", "tcpudp(x)");
+
+               if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
+                       sprintf(ip4csum, "%s ",
+                               (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
+                                ? "ip4csum(y)" : "ip4csum(n)");
+               else
+                       sprintf(ip4csum, "%s ", "ip4csum(x)");
+
+               if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
+                       sprintf(l4csum, "%s ",
+                               (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
+                                ? "l4csum(y)" : "l4csum(n)");
+               else
+                       sprintf(l4csum, "%s ", "l4csum(x)");
+
+               if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
+                       sprintf(ipfrag, "%s ",
+                               (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
+                                ? "ipfrag(y)" : "ipfrag(n)");
+               else
+                       sprintf(ipfrag, "%s ", "ipfrag(x)");
+               FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
+                        tcp, tcpudp, ip4csum, l4csum, ipfrag);
+
+               for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
+                       mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
+                       while (mbyte && !gp->layer[i].mask[mbyte])
+                               mbyte--;
+                       if (mbyte == 0)
+                               continue;
+
+                       bp = buf;
+                       for (j = 0; j <= mbyte; j++) {
+                               sprintf(bp, "%02x",
+                                       gp->layer[i].mask[j]);
+                               bp += 2;
+                       }
+                       *bp = '\0';
+                       FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
+                       bp = buf;
+                       for (j = 0; j <= mbyte; j++) {
+                               sprintf(bp, "%02x",
+                                       gp->layer[i].val[j]);
+                               bp += 2;
+                       }
+                       *bp = '\0';
+                       FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
+               }
+               break;
+       default:
+               FLOW_LOG(INFO, "FILTER UNKNOWN\n");
+               break;
+       }
+}
+
+/* Debug function to dump internal NIC flow structures. */
+static void
+enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
+{
+       enic_dump_filter(filt);
+       enic_dump_actions(ea);
+}
+
+
 /**
  * Internal flow parse/validate function.
  *
@@ -966,6 +1340,12 @@ enic_flow_parse(struct rte_eth_dev *dev,
                                           NULL,
                                           "egress is not supported");
                        return -rte_errno;
+               } else if (attrs->transfer) {
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                                          NULL,
+                                          "transfer is not supported");
+                       return -rte_errno;
                } else if (!attrs->ingress) {
                        rte_flow_error_set(error, ENOTSUP,
                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
@@ -1034,8 +1414,10 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
                   struct rte_flow_error *error)
 {
        struct rte_flow *flow;
-       int ret;
-       u16 entry;
+       int err;
+       uint16_t entry;
+       int ctr_idx;
+       int last_max_flow_ctr;
 
        FLOW_TRACE();
 
@@ -1046,20 +1428,64 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
                return NULL;
        }
 
+       flow->counter_idx = -1;
+       last_max_flow_ctr = -1;
+       if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
+               if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
+                       rte_flow_error_set(error, ENOMEM,
+                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                          NULL, "cannot allocate counter");
+                       goto unwind_flow_alloc;
+               }
+               flow->counter_idx = ctr_idx;
+               enic_action->counter_index = ctr_idx;
+
+               /* If index is the largest, increase the counter DMA size */
+               if (ctr_idx > enic->max_flow_counter) {
+                       err = vnic_dev_counter_dma_cfg(enic->vdev,
+                                                VNIC_FLOW_COUNTER_UPDATE_MSECS,
+                                                ctr_idx);
+                       if (err) {
+                               rte_flow_error_set(error, -err,
+                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                          NULL, "counter DMA config failed");
+                               goto unwind_ctr_alloc;
+                       }
+                       last_max_flow_ctr = enic->max_flow_counter;
+                       enic->max_flow_counter = ctr_idx;
+               }
+       }
+
        /* entry[in] is the queue id, entry[out] is the filter Id for delete */
        entry = enic_action->rq_idx;
-       ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
+       err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
                                  enic_action);
-       if (!ret) {
-               flow->enic_filter_id = entry;
-               flow->enic_filter = *enic_filter;
-       } else {
-               rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+       if (err) {
+               rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
                                   NULL, "vnic_dev_classifier error");
-               rte_free(flow);
-               return NULL;
+               goto unwind_ctr_dma_cfg;
        }
+
+       flow->enic_filter_id = entry;
+       flow->enic_filter = *enic_filter;
+
        return flow;
+
+/* unwind if there are errors */
+unwind_ctr_dma_cfg:
+       if (last_max_flow_ctr != -1) {
+               /* reduce counter DMA size */
+               vnic_dev_counter_dma_cfg(enic->vdev,
+                                        VNIC_FLOW_COUNTER_UPDATE_MSECS,
+                                        last_max_flow_ctr);
+               enic->max_flow_counter = last_max_flow_ctr;
+       }
+unwind_ctr_alloc:
+       if (flow->counter_idx != -1)
+               vnic_dev_counter_free(enic->vdev, ctr_idx);
+unwind_flow_alloc:
+       rte_free(flow);
+       return NULL;
 }
 
 /**
@@ -1074,18 +1500,29 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
  * @param error[out]
  */
 static int
-enic_flow_del_filter(struct enic *enic, u16 filter_id,
+enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
                   struct rte_flow_error *error)
 {
-       int ret;
+       u16 filter_id;
+       int err;
 
        FLOW_TRACE();
 
-       ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
-       if (!ret)
-               rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+       filter_id = flow->enic_filter_id;
+       err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
+       if (err) {
+               rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
                                   NULL, "vnic_dev_classifier failed");
-       return ret;
+               return -err;
+       }
+
+       if (flow->counter_idx != -1) {
+               if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
+                       dev_err(enic, "counter free failed, idx: %d\n",
+                               flow->counter_idx);
+               flow->counter_idx = -1;
+       }
+       return 0;
 }
 
 /*
@@ -1112,6 +1549,8 @@ enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
 
        ret = enic_flow_parse(dev, attrs, pattern, actions, error,
                               &enic_filter, &enic_action);
+       if (!ret)
+               enic_dump_flow(&enic_action, &enic_filter);
        return ret;
 }
 
@@ -1166,9 +1605,10 @@ enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
        FLOW_TRACE();
 
        rte_spinlock_lock(&enic->flows_lock);
-       enic_flow_del_filter(enic, flow->enic_filter_id, error);
+       enic_flow_del_filter(enic, flow, error);
        LIST_REMOVE(flow, next);
        rte_spinlock_unlock(&enic->flows_lock);
+       rte_free(flow);
        return 0;
 }
 
@@ -1190,13 +1630,77 @@ enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
 
        while (!LIST_EMPTY(&enic->flows)) {
                flow = LIST_FIRST(&enic->flows);
-               enic_flow_del_filter(enic, flow->enic_filter_id, error);
+               enic_flow_del_filter(enic, flow, error);
                LIST_REMOVE(flow, next);
+               rte_free(flow);
        }
        rte_spinlock_unlock(&enic->flows_lock);
        return 0;
 }
 
+static int
+enic_flow_query_count(struct rte_eth_dev *dev,
+                     struct rte_flow *flow, void *data,
+                     struct rte_flow_error *error)
+{
+       struct enic *enic = pmd_priv(dev);
+       struct rte_flow_query_count *query;
+       uint64_t packets, bytes;
+
+       FLOW_TRACE();
+
+       if (flow->counter_idx == -1) {
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "flow does not have counter");
+       }
+       query = (struct rte_flow_query_count *)data;
+       if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
+                                   !!query->reset, &packets, &bytes)) {
+               return rte_flow_error_set
+                       (error, EINVAL,
+                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                        NULL,
+                        "cannot read counter");
+       }
+       query->hits_set = 1;
+       query->bytes_set = 1;
+       query->hits = packets;
+       query->bytes = bytes;
+       return 0;
+}
+
+static int
+enic_flow_query(struct rte_eth_dev *dev,
+               struct rte_flow *flow,
+               const struct rte_flow_action *actions,
+               void *data,
+               struct rte_flow_error *error)
+{
+       int ret = 0;
+
+       FLOW_TRACE();
+
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+               switch (actions->type) {
+               case RTE_FLOW_ACTION_TYPE_VOID:
+                       break;
+               case RTE_FLOW_ACTION_TYPE_COUNT:
+                       ret = enic_flow_query_count(dev, flow, data, error);
+                       break;
+               default:
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 actions,
+                                                 "action not supported");
+               }
+               if (ret < 0)
+                       return ret;
+       }
+       return 0;
+}
+
 /**
  * Flow callback registration.
  *
@@ -1207,4 +1711,5 @@ const struct rte_flow_ops enic_flow_ops = {
        .create = enic_flow_create,
        .destroy = enic_flow_destroy,
        .flush = enic_flow_flush,
+       .query = enic_flow_query,
 };