net: add rte prefix to ether defines
[dpdk.git] / drivers / net / cxgbe / cxgbe_flow.c
index dfb5fac..4f23468 100644 (file)
@@ -2,19 +2,17 @@
  * Copyright(c) 2018 Chelsio Communications.
  * All rights reserved.
  */
-#include "common.h"
+#include "base/common.h"
 #include "cxgbe_flow.h"
 
 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
 do { \
-       if (!((fs)->val.elem || (fs)->mask.elem)) { \
-               (fs)->val.elem = (__v); \
-               (fs)->mask.elem = (__m); \
-       } else { \
+       if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
                return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
-                                         NULL, "a filter can be specified" \
-                                         " only once"); \
-       } \
+                                         NULL, "Redefined match item with" \
+                                         " different values found"); \
+       (fs)->val.elem = (__v); \
+       (fs)->mask.elem = (__m); \
 } while (0)
 
 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
@@ -93,6 +91,10 @@ cxgbe_fill_filter_region(struct adapter *adap,
                ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
        if (tp->ethertype_shift >= 0)
                ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
+       if (tp->port_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
+       if (tp->macmatch_shift >= 0)
+               ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
 
        if (ntuple_mask != hash_filter_mask)
                return;
@@ -100,6 +102,67 @@ cxgbe_fill_filter_region(struct adapter *adap,
        fs->cap = 1;    /* use hash region */
 }
 
+static int
+ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
+                    struct ch_filter_specification *fs,
+                    struct rte_flow_error *e)
+{
+       const struct rte_flow_item_eth *spec = item->spec;
+       const struct rte_flow_item_eth *umask = item->mask;
+       const struct rte_flow_item_eth *mask;
+
+       /* If user has not given any mask, then use chelsio supported mask. */
+       mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
+
+       /* we don't support SRC_MAC filtering*/
+       if (!rte_is_zero_ether_addr(&mask->src))
+               return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+                                         item,
+                                         "src mac filtering not supported");
+
+       if (!rte_is_zero_ether_addr(&mask->dst)) {
+               const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
+               const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
+               struct rte_flow *flow = (struct rte_flow *)fs->private;
+               struct port_info *pi = (struct port_info *)
+                                       (flow->dev->data->dev_private);
+               int idx;
+
+               idx = cxgbe_mpstcam_alloc(pi, addr, m);
+               if (idx <= 0)
+                       return rte_flow_error_set(e, idx,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 NULL, "unable to allocate mac"
+                                                 " entry in h/w");
+               CXGBE_FILL_FS(idx, 0x1ff, macidx);
+       }
+
+       CXGBE_FILL_FS(be16_to_cpu(spec->type),
+                     be16_to_cpu(mask->type), ethtype);
+       return 0;
+}
+
+static int
+ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
+                     struct ch_filter_specification *fs,
+                     struct rte_flow_error *e)
+{
+       const struct rte_flow_item_phy_port *val = item->spec;
+       const struct rte_flow_item_phy_port *umask = item->mask;
+       const struct rte_flow_item_phy_port *mask;
+
+       mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
+
+       if (val->index > 0x7)
+               return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+                                         item,
+                                         "port index upto 0x7 is supported");
+
+       CXGBE_FILL_FS(val->index, mask->index, iport);
+
+       return 0;
+}
+
 static int
 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
                     struct ch_filter_specification *fs,
@@ -170,7 +233,7 @@ ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
                                          item, "ttl/tos are not supported");
 
        fs->type = FILTER_TYPE_IPV4;
-       CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
+       CXGBE_FILL_FS(RTE_ETHER_TYPE_IPv4, 0xffff, ethtype);
        if (!val)
                return 0; /* ipv4 wild card */
 
@@ -199,7 +262,7 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
                                          "tc/flow/hop are not supported");
 
        fs->type = FILTER_TYPE_IPV6;
-       CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
+       CXGBE_FILL_FS(RTE_ETHER_TYPE_IPv6, 0xffff, ethtype);
        if (!val)
                return 0; /* ipv6 wild card */
 
@@ -303,15 +366,222 @@ static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
        return 0;
 }
 
+static int
+cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
+{
+       const struct rte_flow_item *i;
+       int j, index = -ENOENT;
+
+       for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
+               if (i->type == type) {
+                       index = j;
+                       break;
+               }
+       }
+
+       return index;
+}
+
+static int
+ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
+{
+       /* nmode:
+        * BIT_0 = [src_ip],   BIT_1 = [dst_ip]
+        * BIT_2 = [src_port], BIT_3 = [dst_port]
+        *
+        * Only below cases are supported as per our spec.
+        */
+       switch (nmode) {
+       case 0:  /* 0000b */
+               fs->nat_mode = NAT_MODE_NONE;
+               break;
+       case 2:  /* 0010b */
+               fs->nat_mode = NAT_MODE_DIP;
+               break;
+       case 5:  /* 0101b */
+               fs->nat_mode = NAT_MODE_SIP_SP;
+               break;
+       case 7:  /* 0111b */
+               fs->nat_mode = NAT_MODE_DIP_SIP_SP;
+               break;
+       case 10: /* 1010b */
+               fs->nat_mode = NAT_MODE_DIP_DP;
+               break;
+       case 11: /* 1011b */
+               fs->nat_mode = NAT_MODE_DIP_DP_SIP;
+               break;
+       case 14: /* 1110b */
+               fs->nat_mode = NAT_MODE_DIP_DP_SP;
+               break;
+       case 15: /* 1111b */
+               fs->nat_mode = NAT_MODE_ALL;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+ch_rte_parse_atype_switch(const struct rte_flow_action *a,
+                         const struct rte_flow_item items[],
+                         uint8_t *nmode,
+                         struct ch_filter_specification *fs,
+                         struct rte_flow_error *e)
+{
+       const struct rte_flow_action_of_set_vlan_vid *vlanid;
+       const struct rte_flow_action_of_push_vlan *pushvlan;
+       const struct rte_flow_action_set_ipv4 *ipv4;
+       const struct rte_flow_action_set_ipv6 *ipv6;
+       const struct rte_flow_action_set_tp *tp_port;
+       const struct rte_flow_action_phy_port *port;
+       int item_index;
+
+       switch (a->type) {
+       case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+               vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
+                         a->conf;
+               fs->newvlan = VLAN_REWRITE;
+               fs->vlan = vlanid->vlan_vid;
+               break;
+       case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+               pushvlan = (const struct rte_flow_action_of_push_vlan *)
+                           a->conf;
+               if (pushvlan->ethertype != RTE_ETHER_TYPE_VLAN)
+                       return rte_flow_error_set(e, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                                 "only ethertype 0x8100 "
+                                                 "supported for push vlan.");
+               fs->newvlan = VLAN_INSERT;
+               break;
+       case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+               fs->newvlan = VLAN_REMOVE;
+               break;
+       case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+               port = (const struct rte_flow_action_phy_port *)a->conf;
+               fs->eport = port->index;
+               break;
+       case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+               item_index = cxgbe_get_flow_item_index(items,
+                                                      RTE_FLOW_ITEM_TYPE_IPV4);
+               if (item_index < 0)
+                       return rte_flow_error_set(e, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                                 "No RTE_FLOW_ITEM_TYPE_IPV4 "
+                                                 "found.");
+
+               ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
+               memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
+               *nmode |= 1 << 0;
+               break;
+       case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+               item_index = cxgbe_get_flow_item_index(items,
+                                                      RTE_FLOW_ITEM_TYPE_IPV4);
+               if (item_index < 0)
+                       return rte_flow_error_set(e, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                                 "No RTE_FLOW_ITEM_TYPE_IPV4 "
+                                                 "found.");
+
+               ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
+               memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
+               *nmode |= 1 << 1;
+               break;
+       case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+               item_index = cxgbe_get_flow_item_index(items,
+                                                      RTE_FLOW_ITEM_TYPE_IPV6);
+               if (item_index < 0)
+                       return rte_flow_error_set(e, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                                 "No RTE_FLOW_ITEM_TYPE_IPV6 "
+                                                 "found.");
+
+               ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
+               memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
+               *nmode |= 1 << 0;
+               break;
+       case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+               item_index = cxgbe_get_flow_item_index(items,
+                                                      RTE_FLOW_ITEM_TYPE_IPV6);
+               if (item_index < 0)
+                       return rte_flow_error_set(e, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                                 "No RTE_FLOW_ITEM_TYPE_IPV6 "
+                                                 "found.");
+
+               ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
+               memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
+               *nmode |= 1 << 1;
+               break;
+       case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+               item_index = cxgbe_get_flow_item_index(items,
+                                                      RTE_FLOW_ITEM_TYPE_TCP);
+               if (item_index < 0) {
+                       item_index =
+                               cxgbe_get_flow_item_index(items,
+                                               RTE_FLOW_ITEM_TYPE_UDP);
+                       if (item_index < 0)
+                               return rte_flow_error_set(e, EINVAL,
+                                               RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                               "No RTE_FLOW_ITEM_TYPE_TCP or "
+                                               "RTE_FLOW_ITEM_TYPE_UDP found");
+               }
+
+               tp_port = (const struct rte_flow_action_set_tp *)a->conf;
+               fs->nat_fport = be16_to_cpu(tp_port->port);
+               *nmode |= 1 << 2;
+               break;
+       case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+               item_index = cxgbe_get_flow_item_index(items,
+                                                      RTE_FLOW_ITEM_TYPE_TCP);
+               if (item_index < 0) {
+                       item_index =
+                               cxgbe_get_flow_item_index(items,
+                                               RTE_FLOW_ITEM_TYPE_UDP);
+                       if (item_index < 0)
+                               return rte_flow_error_set(e, EINVAL,
+                                               RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                               "No RTE_FLOW_ITEM_TYPE_TCP or "
+                                               "RTE_FLOW_ITEM_TYPE_UDP found");
+               }
+
+               tp_port = (const struct rte_flow_action_set_tp *)a->conf;
+               fs->nat_lport = be16_to_cpu(tp_port->port);
+               *nmode |= 1 << 3;
+               break;
+       case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
+               item_index = cxgbe_get_flow_item_index(items,
+                                                      RTE_FLOW_ITEM_TYPE_ETH);
+               if (item_index < 0)
+                       return rte_flow_error_set(e, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                                 "No RTE_FLOW_ITEM_TYPE_ETH "
+                                                 "found");
+               fs->swapmac = 1;
+               break;
+       default:
+               /* We are not supposed to come here */
+               return rte_flow_error_set(e, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                         "Action not supported");
+       }
+
+       return 0;
+}
+
 static int
 cxgbe_rtef_parse_actions(struct rte_flow *flow,
+                        const struct rte_flow_item items[],
                         const struct rte_flow_action action[],
                         struct rte_flow_error *e)
 {
        struct ch_filter_specification *fs = &flow->fs;
+       uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
        const struct rte_flow_action_queue *q;
        const struct rte_flow_action *a;
        char abit = 0;
+       int ret;
 
        for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
                switch (a->type) {
@@ -345,6 +615,41 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow,
                case RTE_FLOW_ACTION_TYPE_COUNT:
                        fs->hitcnts = 1;
                        break;
+               case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+               case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+               case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+               case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+               case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
+               case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+               case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+                       nat_ipv4++;
+                       goto action_switch;
+               case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+               case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+                       nat_ipv6++;
+                       goto action_switch;
+               case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+               case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+action_switch:
+                       /* We allow multiple switch actions, but switch is
+                        * not compatible with either queue or drop
+                        */
+                       if (abit++ && fs->action != FILTER_SWITCH)
+                               return rte_flow_error_set(e, EINVAL,
+                                               RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                               "overlapping action specified");
+                       if (nat_ipv4 && nat_ipv6)
+                               return rte_flow_error_set(e, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                       "Can't have one address ipv4 and the"
+                                       " other ipv6");
+
+                       ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
+                                                       e);
+                       if (ret)
+                               return ret;
+                       fs->action = FILTER_SWITCH;
+                       break;
                default:
                        /* Not supported action : return error */
                        return rte_flow_error_set(e, ENOTSUP,
@@ -353,10 +658,30 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow,
                }
        }
 
+       if (ch_rte_parse_nat(nmode, fs))
+               return rte_flow_error_set(e, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                         "invalid settings for swich action");
        return 0;
 }
 
-struct chrte_fparse parseitem[] = {
+static struct chrte_fparse parseitem[] = {
+       [RTE_FLOW_ITEM_TYPE_ETH] = {
+               .fptr  = ch_rte_parsetype_eth,
+               .dmask = &(const struct rte_flow_item_eth){
+                       .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+                       .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+                       .type = 0xffff,
+               }
+       },
+
+       [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
+               .fptr = ch_rte_parsetype_port,
+               .dmask = &(const struct rte_flow_item_phy_port){
+                       .index = 0x7,
+               }
+       },
+
        [RTE_FLOW_ITEM_TYPE_IPV4] = {
                .fptr  = ch_rte_parsetype_ipv4,
                .dmask = &rte_flow_item_ipv4_mask,
@@ -388,10 +713,10 @@ cxgbe_rtef_parse_items(struct rte_flow *flow,
        char repeat[ARRAY_SIZE(parseitem)] = {0};
 
        for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
-               struct chrte_fparse *idx = &flow->item_parser[i->type];
+               struct chrte_fparse *idx;
                int ret;
 
-               if (i->type > ARRAY_SIZE(parseitem))
+               if (i->type >= ARRAY_SIZE(parseitem))
                        return rte_flow_error_set(e, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  i, "Item not supported");
@@ -407,11 +732,16 @@ cxgbe_rtef_parse_items(struct rte_flow *flow,
                                                "parse items cannot be repeated (except void)");
                        repeat[i->type] = 1;
 
+                       /* No spec found for this pattern item. Skip it */
+                       if (!i->spec)
+                               break;
+
                        /* validate the item */
                        ret = cxgbe_validate_item(i, e);
                        if (ret)
                                return ret;
 
+                       idx = &flow->item_parser[i->type];
                        if (!idx || !idx->fptr) {
                                return rte_flow_error_set(e, ENOTSUP,
                                                RTE_FLOW_ERROR_TYPE_ITEM, i,
@@ -437,7 +767,6 @@ cxgbe_flow_parse(struct rte_flow *flow,
                 struct rte_flow_error *e)
 {
        int ret;
-
        /* parse user request into ch_filter_specification */
        ret = cxgbe_rtef_parse_attr(flow, attr, e);
        if (ret)
@@ -445,13 +774,14 @@ cxgbe_flow_parse(struct rte_flow *flow,
        ret = cxgbe_rtef_parse_items(flow, item, e);
        if (ret)
                return ret;
-       return cxgbe_rtef_parse_actions(flow, action, e);
+       return cxgbe_rtef_parse_actions(flow, item, action, e);
 }
 
 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
        struct ch_filter_specification *fs = &flow->fs;
        struct adapter *adap = ethdev2adap(dev);
+       struct tid_info *t = &adap->tids;
        struct filter_ctx ctx;
        unsigned int fidx;
        int err;
@@ -471,7 +801,7 @@ static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
 
        /* Poll the FW for reply */
        err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
-                                       CXGBE_FLOW_POLL_US,
+                                       CXGBE_FLOW_POLL_MS,
                                        CXGBE_FLOW_POLL_CNT,
                                        &ctx.completion);
        if (err) {
@@ -484,8 +814,13 @@ static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
                return ctx.result;
        }
 
-       flow->fidx = fidx;
-       flow->f = &adap->tids.ftid_tab[fidx];
+       if (fs->cap) { /* to destroy the filter */
+               flow->fidx = ctx.tid;
+               flow->f = lookup_tid(t, ctx.tid);
+       } else {
+               flow->fidx = fidx;
+               flow->f = &adap->tids.ftid_tab[fidx];
+       }
 
        return 0;
 }
@@ -510,6 +845,7 @@ cxgbe_flow_create(struct rte_eth_dev *dev,
 
        flow->item_parser = parseitem;
        flow->dev = dev;
+       flow->fs.private = (void *)flow;
 
        if (cxgbe_flow_parse(flow, attr, item, action, e)) {
                t4_os_free(flow);
@@ -551,7 +887,7 @@ static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 
        /* Poll the FW for reply */
        err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
-                                       CXGBE_FLOW_POLL_US,
+                                       CXGBE_FLOW_POLL_MS,
                                        CXGBE_FLOW_POLL_CNT,
                                        &ctx.completion);
        if (err) {
@@ -564,6 +900,17 @@ static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                return ctx.result;
        }
 
+       fs = &flow->fs;
+       if (fs->mask.macidx) {
+               struct port_info *pi = (struct port_info *)
+                                       (dev->data->dev_private);
+               int ret;
+
+               ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
+               if (!ret)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -585,13 +932,14 @@ static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
                              u64 *byte_count)
 {
        struct adapter *adap = ethdev2adap(flow->dev);
+       struct ch_filter_specification fs = flow->f->fs;
        unsigned int fidx = flow->fidx;
        int ret = 0;
 
-       ret = cxgbe_get_filter_count(adap, fidx, count, 0);
+       ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
        if (ret)
                return ret;
-       return cxgbe_get_filter_count(adap, fidx, byte_count, 1);
+       return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
 }
 
 static int
@@ -599,6 +947,7 @@ cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
                 const struct rte_flow_action *action, void *data,
                 struct rte_flow_error *e)
 {
+       struct adapter *adap = ethdev2adap(flow->dev);
        struct ch_filter_specification fs;
        struct rte_flow_query_count *c;
        struct filter_entry *f;
@@ -637,6 +986,8 @@ cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
        /* Query was successful */
        c->bytes_set = 1;
        c->hits_set = 1;
+       if (c->reset)
+               cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
 
        return 0; /* success / partial_success */
 }
@@ -723,6 +1074,19 @@ static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
                                goto out;
                }
        }
+
+       if (is_hashfilter(adap) && adap->tids.tid_tab) {
+               struct filter_entry *f;
+
+               for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
+                       f = (struct filter_entry *)adap->tids.tid_tab[i];
+
+                       ret = cxgbe_check_n_destroy(f, dev, e);
+                       if (ret < 0)
+                               goto out;
+               }
+       }
+
 out:
        return ret >= 0 ? 0 : ret;
 }