X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcxgbe%2Fcxgbe_flow.c;h=4deaff8f2f0fb830931b1187399ed627f55ddd45;hb=4dc73ff7042a27602a738be4450a8bae39388c4f;hp=8d25dc26e8677b6b4dc6217e7c72678714e00524;hpb=da23bc9d33f4e76f4eeaf525f2b8b39ad0e23cd7;p=dpdk.git diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c index 8d25dc26e8..4deaff8f2f 100644 --- a/drivers/net/cxgbe/cxgbe_flow.c +++ b/drivers/net/cxgbe/cxgbe_flow.c @@ -7,14 +7,12 @@ #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \ do { \ - if (!((fs)->val.elem || (fs)->mask.elem)) { \ - (fs)->val.elem = (__v); \ - (fs)->mask.elem = (__m); \ - } else { \ + if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \ - NULL, "a filter can be specified" \ - " only once"); \ - } \ + NULL, "Redefined match item with" \ + " different values found"); \ + (fs)->val.elem = (__v); \ + (fs)->mask.elem = (__m); \ } while (0) #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \ @@ -48,6 +46,123 @@ cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e) return 0; } +static void +cxgbe_fill_filter_region(struct adapter *adap, + struct ch_filter_specification *fs) +{ + struct tp_params *tp = &adap->params.tp; + u64 hash_filter_mask = tp->hash_filter_mask; + u64 ntuple_mask = 0; + + fs->cap = 0; + + if (!is_hashfilter(adap)) + return; + + if (fs->type) { + uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff}; + uint8_t bitoff[16] = {0}; + + if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) || + !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) || + memcmp(fs->mask.lip, biton, sizeof(biton)) || + memcmp(fs->mask.fip, biton, sizeof(biton))) + return; + } else { + uint32_t biton = 0xffffffff; + uint32_t bitoff = 0x0U; + + if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) || + !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) || + memcmp(fs->mask.lip, &biton, sizeof(biton)) || + memcmp(fs->mask.fip, &biton, sizeof(biton))) + return; + } + + if (!fs->val.lport || fs->mask.lport != 0xffff) + return; + if (!fs->val.fport || fs->mask.fport != 0xffff) + return; + + if (tp->protocol_shift >= 0) + ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift; + if (tp->ethertype_shift >= 0) + ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift; + if (tp->port_shift >= 0) + ntuple_mask |= (u64)fs->mask.iport << tp->port_shift; + if (tp->macmatch_shift >= 0) + ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift; + + if (ntuple_mask != hash_filter_mask) + return; + + fs->cap = 1; /* use hash region */ +} + +static int +ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_eth *spec = item->spec; + const struct rte_flow_item_eth *umask = item->mask; + const struct rte_flow_item_eth *mask; + + /* If user has not given any mask, then use chelsio supported mask. */ + mask = umask ? umask : (const struct rte_flow_item_eth *)dmask; + + /* we don't support SRC_MAC filtering*/ + if (!is_zero_ether_addr(&mask->src)) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "src mac filtering not supported"); + + if (!is_zero_ether_addr(&mask->dst)) { + const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0]; + const u8 *m = (const u8 *)&mask->dst.addr_bytes[0]; + struct rte_flow *flow = (struct rte_flow *)fs->private; + struct port_info *pi = (struct port_info *) + (flow->dev->data->dev_private); + int idx; + + idx = cxgbe_mpstcam_alloc(pi, addr, m); + if (idx <= 0) + return rte_flow_error_set(e, idx, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "unable to allocate mac" + " entry in h/w"); + CXGBE_FILL_FS(idx, 0x1ff, macidx); + } + + CXGBE_FILL_FS(be16_to_cpu(spec->type), + be16_to_cpu(mask->type), ethtype); + return 0; +} + +static int +ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_phy_port *val = item->spec; + const struct rte_flow_item_phy_port *umask = item->mask; + const struct rte_flow_item_phy_port *mask; + + mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask; + + if (val->index > 0x7) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "port index upto 0x7 is supported"); + + CXGBE_FILL_FS(val->index, mask->index, iport); + + return 0; +} + static int ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item, struct ch_filter_specification *fs, @@ -222,6 +337,8 @@ cxgbe_validate_fidxonadd(struct ch_filter_specification *fs, static int cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del) { + if (flow->fs.cap) + return 0; /* Hash filters */ return del ? cxgbe_validate_fidxondel(flow->f, fidx) : cxgbe_validate_fidxonadd(&flow->fs, ethdev2adap(flow->dev), fidx); @@ -249,15 +366,222 @@ static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx) return 0; } +static int +cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type) +{ + const struct rte_flow_item *i; + int j, index = -ENOENT; + + for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) { + if (i->type == type) { + index = j; + break; + } + } + + return index; +} + +static int +ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs) +{ + /* nmode: + * BIT_0 = [src_ip], BIT_1 = [dst_ip] + * BIT_2 = [src_port], BIT_3 = [dst_port] + * + * Only below cases are supported as per our spec. + */ + switch (nmode) { + case 0: /* 0000b */ + fs->nat_mode = NAT_MODE_NONE; + break; + case 2: /* 0010b */ + fs->nat_mode = NAT_MODE_DIP; + break; + case 5: /* 0101b */ + fs->nat_mode = NAT_MODE_SIP_SP; + break; + case 7: /* 0111b */ + fs->nat_mode = NAT_MODE_DIP_SIP_SP; + break; + case 10: /* 1010b */ + fs->nat_mode = NAT_MODE_DIP_DP; + break; + case 11: /* 1011b */ + fs->nat_mode = NAT_MODE_DIP_DP_SIP; + break; + case 14: /* 1110b */ + fs->nat_mode = NAT_MODE_DIP_DP_SP; + break; + case 15: /* 1111b */ + fs->nat_mode = NAT_MODE_ALL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +ch_rte_parse_atype_switch(const struct rte_flow_action *a, + const struct rte_flow_item items[], + uint8_t *nmode, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_action_of_set_vlan_vid *vlanid; + const struct rte_flow_action_of_push_vlan *pushvlan; + const struct rte_flow_action_set_ipv4 *ipv4; + const struct rte_flow_action_set_ipv6 *ipv6; + const struct rte_flow_action_set_tp *tp_port; + const struct rte_flow_action_phy_port *port; + int item_index; + + switch (a->type) { + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + vlanid = (const struct rte_flow_action_of_set_vlan_vid *) + a->conf; + fs->newvlan = VLAN_REWRITE; + fs->vlan = vlanid->vlan_vid; + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + pushvlan = (const struct rte_flow_action_of_push_vlan *) + a->conf; + if (pushvlan->ethertype != ETHER_TYPE_VLAN) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "only ethertype 0x8100 " + "supported for push vlan."); + fs->newvlan = VLAN_INSERT; + break; + case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: + fs->newvlan = VLAN_REMOVE; + break; + case RTE_FLOW_ACTION_TYPE_PHY_PORT: + port = (const struct rte_flow_action_phy_port *)a->conf; + fs->eport = port->index; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_IPV4); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_IPV4 " + "found."); + + ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf; + memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr)); + *nmode |= 1 << 0; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_IPV4); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_IPV4 " + "found."); + + ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf; + memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr)); + *nmode |= 1 << 1; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_IPV6); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_IPV6 " + "found."); + + ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf; + memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr)); + *nmode |= 1 << 0; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_IPV6); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_IPV6 " + "found."); + + ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf; + memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr)); + *nmode |= 1 << 1; + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_TCP); + if (item_index < 0) { + item_index = + cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_UDP); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_TCP or " + "RTE_FLOW_ITEM_TYPE_UDP found"); + } + + tp_port = (const struct rte_flow_action_set_tp *)a->conf; + fs->nat_fport = be16_to_cpu(tp_port->port); + *nmode |= 1 << 2; + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_TCP); + if (item_index < 0) { + item_index = + cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_UDP); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_TCP or " + "RTE_FLOW_ITEM_TYPE_UDP found"); + } + + tp_port = (const struct rte_flow_action_set_tp *)a->conf; + fs->nat_lport = be16_to_cpu(tp_port->port); + *nmode |= 1 << 3; + break; + case RTE_FLOW_ACTION_TYPE_MAC_SWAP: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_ETH); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_ETH " + "found"); + fs->swapmac = 1; + break; + default: + /* We are not supposed to come here */ + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "Action not supported"); + } + + return 0; +} + static int cxgbe_rtef_parse_actions(struct rte_flow *flow, + const struct rte_flow_item items[], const struct rte_flow_action action[], struct rte_flow_error *e) { struct ch_filter_specification *fs = &flow->fs; + uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0; const struct rte_flow_action_queue *q; const struct rte_flow_action *a; char abit = 0; + int ret; for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) { switch (a->type) { @@ -291,6 +615,41 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow, case RTE_FLOW_ACTION_TYPE_COUNT: fs->hitcnts = 1; break; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: + case RTE_FLOW_ACTION_TYPE_PHY_PORT: + case RTE_FLOW_ACTION_TYPE_MAC_SWAP: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + nat_ipv4++; + goto action_switch; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + nat_ipv6++; + goto action_switch; + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: +action_switch: + /* We allow multiple switch actions, but switch is + * not compatible with either queue or drop + */ + if (abit++ && fs->action != FILTER_SWITCH) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "overlapping action specified"); + if (nat_ipv4 && nat_ipv6) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "Can't have one address ipv4 and the" + " other ipv6"); + + ret = ch_rte_parse_atype_switch(a, items, &nmode, fs, + e); + if (ret) + return ret; + fs->action = FILTER_SWITCH; + break; default: /* Not supported action : return error */ return rte_flow_error_set(e, ENOTSUP, @@ -299,10 +658,30 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow, } } + if (ch_rte_parse_nat(nmode, fs)) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "invalid settings for swich action"); return 0; } -struct chrte_fparse parseitem[] = { +static struct chrte_fparse parseitem[] = { + [RTE_FLOW_ITEM_TYPE_ETH] = { + .fptr = ch_rte_parsetype_eth, + .dmask = &(const struct rte_flow_item_eth){ + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", + .type = 0xffff, + } + }, + + [RTE_FLOW_ITEM_TYPE_PHY_PORT] = { + .fptr = ch_rte_parsetype_port, + .dmask = &(const struct rte_flow_item_phy_port){ + .index = 0x7, + } + }, + [RTE_FLOW_ITEM_TYPE_IPV4] = { .fptr = ch_rte_parsetype_ipv4, .dmask = &rte_flow_item_ipv4_mask, @@ -329,14 +708,15 @@ cxgbe_rtef_parse_items(struct rte_flow *flow, const struct rte_flow_item items[], struct rte_flow_error *e) { + struct adapter *adap = ethdev2adap(flow->dev); const struct rte_flow_item *i; char repeat[ARRAY_SIZE(parseitem)] = {0}; for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) { - struct chrte_fparse *idx = &flow->item_parser[i->type]; + struct chrte_fparse *idx; int ret; - if (i->type > ARRAY_SIZE(parseitem)) + if (i->type >= ARRAY_SIZE(parseitem)) return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, i, "Item not supported"); @@ -357,6 +737,7 @@ cxgbe_rtef_parse_items(struct rte_flow *flow, if (ret) return ret; + idx = &flow->item_parser[i->type]; if (!idx || !idx->fptr) { return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, i, @@ -369,6 +750,8 @@ cxgbe_rtef_parse_items(struct rte_flow *flow, } } + cxgbe_fill_filter_region(adap, &flow->fs); + return 0; } @@ -380,7 +763,6 @@ cxgbe_flow_parse(struct rte_flow *flow, struct rte_flow_error *e) { int ret; - /* parse user request into ch_filter_specification */ ret = cxgbe_rtef_parse_attr(flow, attr, e); if (ret) @@ -388,13 +770,14 @@ cxgbe_flow_parse(struct rte_flow *flow, ret = cxgbe_rtef_parse_items(flow, item, e); if (ret) return ret; - return cxgbe_rtef_parse_actions(flow, action, e); + return cxgbe_rtef_parse_actions(flow, item, action, e); } static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow) { struct ch_filter_specification *fs = &flow->fs; struct adapter *adap = ethdev2adap(dev); + struct tid_info *t = &adap->tids; struct filter_ctx ctx; unsigned int fidx; int err; @@ -414,7 +797,7 @@ static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow) /* Poll the FW for reply */ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq, - CXGBE_FLOW_POLL_US, + CXGBE_FLOW_POLL_MS, CXGBE_FLOW_POLL_CNT, &ctx.completion); if (err) { @@ -427,8 +810,13 @@ static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow) return ctx.result; } - flow->fidx = fidx; - flow->f = &adap->tids.ftid_tab[fidx]; + if (fs->cap) { /* to destroy the filter */ + flow->fidx = ctx.tid; + flow->f = lookup_tid(t, ctx.tid); + } else { + flow->fidx = fidx; + flow->f = &adap->tids.ftid_tab[fidx]; + } return 0; } @@ -453,6 +841,7 @@ cxgbe_flow_create(struct rte_eth_dev *dev, flow->item_parser = parseitem; flow->dev = dev; + flow->fs.private = (void *)flow; if (cxgbe_flow_parse(flow, attr, item, action, e)) { t4_os_free(flow); @@ -468,6 +857,8 @@ cxgbe_flow_create(struct rte_eth_dev *dev, return NULL; } + flow->f->private = flow; /* Will be used during flush */ + return flow; } @@ -492,7 +883,7 @@ static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) /* Poll the FW for reply */ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq, - CXGBE_FLOW_POLL_US, + CXGBE_FLOW_POLL_MS, CXGBE_FLOW_POLL_CNT, &ctx.completion); if (err) { @@ -505,6 +896,17 @@ static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) return ctx.result; } + fs = &flow->fs; + if (fs->mask.macidx) { + struct port_info *pi = (struct port_info *) + (dev->data->dev_private); + int ret; + + ret = cxgbe_mpstcam_remove(pi, fs->val.macidx); + if (!ret) + return ret; + } + return 0; } @@ -522,6 +924,67 @@ cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, return 0; } +static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count, + u64 *byte_count) +{ + struct adapter *adap = ethdev2adap(flow->dev); + struct ch_filter_specification fs = flow->f->fs; + unsigned int fidx = flow->fidx; + int ret = 0; + + ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0); + if (ret) + return ret; + return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1); +} + +static int +cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, + struct rte_flow_error *e) +{ + struct ch_filter_specification fs; + struct rte_flow_query_count *c; + struct filter_entry *f; + int ret; + + RTE_SET_USED(dev); + + f = flow->f; + fs = f->fs; + + if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "only count supported for query"); + + /* + * This is a valid operation, Since we are allowed to do chelsio + * specific operations in rte side of our code but not vise-versa + * + * So, fs can be queried/modified here BUT rte_flow_query_count + * cannot be worked on by the lower layer since we want to maintain + * it as rte_flow agnostic. + */ + if (!fs.hitcnts) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + &fs, "filter hit counters were not" + " enabled during filter creation"); + + c = (struct rte_flow_query_count *)data; + ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes); + if (ret) + return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION, + f, "cxgbe pmd failed to" + " perform query"); + + /* Query was successful */ + c->bytes_set = 1; + c->hits_set = 1; + + return 0; /* success / partial_success */ +} + static int cxgbe_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -572,12 +1035,61 @@ cxgbe_flow_validate(struct rte_eth_dev *dev, return 0; } +/* + * @ret : > 0 filter destroyed succsesfully + * < 0 error destroying filter + * == 1 filter not active / not found + */ +static int +cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev, + struct rte_flow_error *e) +{ + if (f && (f->valid || f->pending) && + f->dev == dev && /* Only if user has asked for this port */ + f->private) /* We (rte_flow) created this filter */ + return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private, + e); + return 1; +} + +static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e) +{ + struct adapter *adap = ethdev2adap(dev); + unsigned int i; + int ret = 0; + + if (adap->tids.ftid_tab) { + struct filter_entry *f = &adap->tids.ftid_tab[0]; + + for (i = 0; i < adap->tids.nftids; i++, f++) { + ret = cxgbe_check_n_destroy(f, dev, e); + if (ret < 0) + goto out; + } + } + + if (is_hashfilter(adap) && adap->tids.tid_tab) { + struct filter_entry *f; + + for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) { + f = (struct filter_entry *)adap->tids.tid_tab[i]; + + ret = cxgbe_check_n_destroy(f, dev, e); + if (ret < 0) + goto out; + } + } + +out: + return ret >= 0 ? 0 : ret; +} + static const struct rte_flow_ops cxgbe_flow_ops = { .validate = cxgbe_flow_validate, .create = cxgbe_flow_create, .destroy = cxgbe_flow_destroy, - .flush = NULL, - .query = NULL, + .flush = cxgbe_flow_flush, + .query = cxgbe_flow_query, .isolate = NULL, };