9070f4960d66c0e1c41bd8d2a0ae478f3bfee72d
[dpdk.git] / drivers / net / cxgbe / cxgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
7
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9 do { \
10         if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12                                           NULL, "Redefined match item with" \
13                                           " different values found"); \
14         (fs)->val.elem = (__v); \
15         (fs)->mask.elem = (__m); \
16 } while (0)
17
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
19 do { \
20         memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21         memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
22 } while (0)
23
24 #define CXGBE_FILL_FS(v, m, elem) \
25         __CXGBE_FILL_FS(v, m, fs, elem, e)
26
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28         __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
29
30 static int
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
32 {
33         /* rte_flow specification does not allow it. */
34         if (!i->spec && (i->mask ||  i->last))
35                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36                                    i, "last or mask given without spec");
37         /*
38          * We don't support it.
39          * Although, we can support values in last as 0's or last == spec.
40          * But this will not provide user with any additional functionality
41          * and will only increase the complexity for us.
42          */
43         if (i->last)
44                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45                                    i, "last is not supported by chelsio pmd");
46         return 0;
47 }
48
49 /**
50  * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51  * there's only 40-bits available to store match fields.
52  * So, to save space, optimize filter spec for some common
53  * known fields that hardware can parse against incoming
54  * packets automatically.
55  */
56 static void
57 cxgbe_tweak_filter_spec(struct adapter *adap,
58                         struct ch_filter_specification *fs)
59 {
60         /* Save 16-bit ethertype field space, by setting corresponding
61          * 1-bit flags in the filter spec for common known ethertypes.
62          * When hardware sees these flags, it automatically infers and
63          * matches incoming packets against the corresponding ethertype.
64          */
65         if (fs->mask.ethtype == 0xffff) {
66                 switch (fs->val.ethtype) {
67                 case RTE_ETHER_TYPE_IPV4:
68                         if (adap->params.tp.ethertype_shift < 0) {
69                                 fs->type = FILTER_TYPE_IPV4;
70                                 fs->val.ethtype = 0;
71                                 fs->mask.ethtype = 0;
72                         }
73                         break;
74                 case RTE_ETHER_TYPE_IPV6:
75                         if (adap->params.tp.ethertype_shift < 0) {
76                                 fs->type = FILTER_TYPE_IPV6;
77                                 fs->val.ethtype = 0;
78                                 fs->mask.ethtype = 0;
79                         }
80                         break;
81                 case RTE_ETHER_TYPE_VLAN:
82                         if (adap->params.tp.ethertype_shift < 0 &&
83                             adap->params.tp.vlan_shift >= 0) {
84                                 fs->val.ivlan_vld = 1;
85                                 fs->mask.ivlan_vld = 1;
86                                 fs->val.ethtype = 0;
87                                 fs->mask.ethtype = 0;
88                         }
89                         break;
90                 default:
91                         break;
92                 }
93         }
94 }
95
96 static void
97 cxgbe_fill_filter_region(struct adapter *adap,
98                          struct ch_filter_specification *fs)
99 {
100         struct tp_params *tp = &adap->params.tp;
101         u64 hash_filter_mask = tp->hash_filter_mask;
102         u64 ntuple_mask = 0;
103
104         fs->cap = 0;
105
106         if (!is_hashfilter(adap))
107                 return;
108
109         if (fs->type) {
110                 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
111                                      0xff, 0xff, 0xff, 0xff,
112                                      0xff, 0xff, 0xff, 0xff,
113                                      0xff, 0xff, 0xff, 0xff};
114                 uint8_t bitoff[16] = {0};
115
116                 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
117                     !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
118                     memcmp(fs->mask.lip, biton, sizeof(biton)) ||
119                     memcmp(fs->mask.fip, biton, sizeof(biton)))
120                         return;
121         } else {
122                 uint32_t biton  = 0xffffffff;
123                 uint32_t bitoff = 0x0U;
124
125                 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
126                     !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
127                     memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
128                     memcmp(fs->mask.fip, &biton, sizeof(biton)))
129                         return;
130         }
131
132         if (!fs->val.lport || fs->mask.lport != 0xffff)
133                 return;
134         if (!fs->val.fport || fs->mask.fport != 0xffff)
135                 return;
136
137         if (tp->protocol_shift >= 0)
138                 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
139         if (tp->ethertype_shift >= 0)
140                 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
141         if (tp->port_shift >= 0)
142                 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
143         if (tp->macmatch_shift >= 0)
144                 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
145         if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
146                 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
147                                tp->vlan_shift;
148
149         if (ntuple_mask != hash_filter_mask)
150                 return;
151
152         fs->cap = 1;    /* use hash region */
153 }
154
155 static int
156 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
157                      struct ch_filter_specification *fs,
158                      struct rte_flow_error *e)
159 {
160         const struct rte_flow_item_eth *spec = item->spec;
161         const struct rte_flow_item_eth *umask = item->mask;
162         const struct rte_flow_item_eth *mask;
163
164         /* If user has not given any mask, then use chelsio supported mask. */
165         mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
166
167         if (!spec)
168                 return 0;
169
170         /* Chelsio hardware supports matching on only one ethertype
171          * (i.e. either the outer or inner ethertype, but not both). If
172          * we already encountered VLAN item, then ensure that the outer
173          * ethertype is VLAN (0x8100) and don't overwrite the inner
174          * ethertype stored during VLAN item parsing. Note that if
175          * 'ivlan_vld' bit is set in Chelsio filter spec, then the
176          * hardware automatically only matches packets with outer
177          * ethertype having VLAN (0x8100).
178          */
179         if (fs->mask.ivlan_vld &&
180             be16_to_cpu(spec->type) != RTE_ETHER_TYPE_VLAN)
181                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
182                                           item,
183                                           "Already encountered VLAN item,"
184                                           " but outer ethertype is not 0x8100");
185
186         /* we don't support SRC_MAC filtering*/
187         if (!rte_is_zero_ether_addr(&mask->src))
188                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
189                                           item,
190                                           "src mac filtering not supported");
191
192         if (!rte_is_zero_ether_addr(&mask->dst)) {
193                 const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
194                 const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
195                 struct rte_flow *flow = (struct rte_flow *)fs->private;
196                 struct port_info *pi = (struct port_info *)
197                                         (flow->dev->data->dev_private);
198                 int idx;
199
200                 idx = cxgbe_mpstcam_alloc(pi, addr, m);
201                 if (idx <= 0)
202                         return rte_flow_error_set(e, idx,
203                                                   RTE_FLOW_ERROR_TYPE_ITEM,
204                                                   NULL, "unable to allocate mac"
205                                                   " entry in h/w");
206                 CXGBE_FILL_FS(idx, 0x1ff, macidx);
207         }
208
209         /* Only set outer ethertype, if we didn't encounter VLAN item yet.
210          * Otherwise, the inner ethertype set by VLAN item will get
211          * overwritten.
212          */
213         if (!fs->mask.ivlan_vld)
214                 CXGBE_FILL_FS(be16_to_cpu(spec->type),
215                               be16_to_cpu(mask->type), ethtype);
216         return 0;
217 }
218
219 static int
220 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
221                       struct ch_filter_specification *fs,
222                       struct rte_flow_error *e)
223 {
224         const struct rte_flow_item_phy_port *val = item->spec;
225         const struct rte_flow_item_phy_port *umask = item->mask;
226         const struct rte_flow_item_phy_port *mask;
227
228         mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
229
230         if (val->index > 0x7)
231                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
232                                           item,
233                                           "port index upto 0x7 is supported");
234
235         CXGBE_FILL_FS(val->index, mask->index, iport);
236
237         return 0;
238 }
239
240 static int
241 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
242                       struct ch_filter_specification *fs,
243                       struct rte_flow_error *e)
244 {
245         const struct rte_flow_item_vlan *spec = item->spec;
246         const struct rte_flow_item_vlan *umask = item->mask;
247         const struct rte_flow_item_vlan *mask;
248
249         /* If user has not given any mask, then use chelsio supported mask. */
250         mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
251
252         CXGBE_FILL_FS(1, 1, ivlan_vld);
253         if (!spec)
254                 return 0; /* Wildcard, match all VLAN */
255
256         /* Chelsio hardware supports matching on only one ethertype
257          * (i.e. either the outer or inner ethertype, but not both).
258          * If outer ethertype is already set and is not VLAN (0x8100),
259          * then don't proceed further. Otherwise, reset the outer
260          * ethertype, so that it can be replaced by inner ethertype.
261          * Note that the hardware will automatically match on outer
262          * ethertype 0x8100, if 'ivlan_vld' bit is set in Chelsio
263          * filter spec.
264          */
265         if (fs->mask.ethtype) {
266                 if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN)
267                         return rte_flow_error_set(e, EINVAL,
268                                                   RTE_FLOW_ERROR_TYPE_ITEM,
269                                                   item,
270                                                   "Outer ethertype not 0x8100");
271
272                 fs->val.ethtype = 0;
273                 fs->mask.ethtype = 0;
274         }
275
276         CXGBE_FILL_FS(be16_to_cpu(spec->tci), be16_to_cpu(mask->tci), ivlan);
277         if (spec->inner_type)
278                 CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
279                               be16_to_cpu(mask->inner_type), ethtype);
280
281         return 0;
282 }
283
284 static int
285 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
286                      struct ch_filter_specification *fs,
287                      struct rte_flow_error *e)
288 {
289         const struct rte_flow_item_udp *val = item->spec;
290         const struct rte_flow_item_udp *umask = item->mask;
291         const struct rte_flow_item_udp *mask;
292
293         mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
294
295         if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
296                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
297                                           item,
298                                           "udp: only src/dst port supported");
299
300         CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
301         if (!val)
302                 return 0;
303         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
304                       be16_to_cpu(mask->hdr.src_port), fport);
305         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
306                       be16_to_cpu(mask->hdr.dst_port), lport);
307         return 0;
308 }
309
310 static int
311 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
312                      struct ch_filter_specification *fs,
313                      struct rte_flow_error *e)
314 {
315         const struct rte_flow_item_tcp *val = item->spec;
316         const struct rte_flow_item_tcp *umask = item->mask;
317         const struct rte_flow_item_tcp *mask;
318
319         mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
320
321         if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
322             mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
323             mask->hdr.tcp_urp)
324                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
325                                           item,
326                                           "tcp: only src/dst port supported");
327
328         CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
329         if (!val)
330                 return 0;
331         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
332                       be16_to_cpu(mask->hdr.src_port), fport);
333         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
334                       be16_to_cpu(mask->hdr.dst_port), lport);
335         return 0;
336 }
337
338 static int
339 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
340                       struct ch_filter_specification *fs,
341                       struct rte_flow_error *e)
342 {
343         const struct rte_flow_item_ipv4 *val = item->spec;
344         const struct rte_flow_item_ipv4 *umask = item->mask;
345         const struct rte_flow_item_ipv4 *mask;
346
347         mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
348
349         if (mask->hdr.time_to_live || mask->hdr.type_of_service)
350                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
351                                           item, "ttl/tos are not supported");
352
353         if (fs->mask.ethtype &&
354             (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
355              fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
356                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
357                                           item,
358                                           "Couldn't find IPv4 ethertype");
359         fs->type = FILTER_TYPE_IPV4;
360         if (!val)
361                 return 0; /* ipv4 wild card */
362
363         CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
364         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
365         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
366
367         return 0;
368 }
369
370 static int
371 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
372                       struct ch_filter_specification *fs,
373                       struct rte_flow_error *e)
374 {
375         const struct rte_flow_item_ipv6 *val = item->spec;
376         const struct rte_flow_item_ipv6 *umask = item->mask;
377         const struct rte_flow_item_ipv6 *mask;
378
379         mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
380
381         if (mask->hdr.vtc_flow ||
382             mask->hdr.payload_len || mask->hdr.hop_limits)
383                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
384                                           item,
385                                           "tc/flow/hop are not supported");
386
387         if (fs->mask.ethtype &&
388             (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
389              fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
390                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
391                                           item,
392                                           "Couldn't find IPv6 ethertype");
393         fs->type = FILTER_TYPE_IPV6;
394         if (!val)
395                 return 0; /* ipv6 wild card */
396
397         CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
398         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
399         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
400
401         return 0;
402 }
403
404 static int
405 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
406                       struct rte_flow_error *e)
407 {
408         if (attr->egress)
409                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
410                                           attr, "attribute:<egress> is"
411                                           " not supported !");
412         if (attr->group > 0)
413                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
414                                           attr, "group parameter is"
415                                           " not supported.");
416
417         flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
418
419         return 0;
420 }
421
422 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
423 {
424         struct port_info *pi = ethdev2pinfo(dev);
425
426         if (rxq > pi->n_rx_qsets)
427                 return -EINVAL;
428         return 0;
429 }
430
431 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
432 {
433         struct adapter *adap = ethdev2adap(f->dev);
434         struct ch_filter_specification fs = f->fs;
435         u8 nentries;
436
437         if (fidx >= adap->tids.nftids) {
438                 dev_err(adap, "invalid flow index %d.\n", fidx);
439                 return -EINVAL;
440         }
441
442         nentries = cxgbe_filter_slots(adap, fs.type);
443         if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
444                 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
445                 return -EINVAL;
446         }
447
448         return 0;
449 }
450
451 static int
452 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
453                          struct adapter *adap, unsigned int fidx)
454 {
455         u8 nentries;
456
457         nentries = cxgbe_filter_slots(adap, fs->type);
458         if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
459                 dev_err(adap, "filter index: %d is busy.\n", fidx);
460                 return -EBUSY;
461         }
462
463         if (fidx >= adap->tids.nftids) {
464                 dev_err(adap, "filter index (%u) >= max(%u)\n",
465                         fidx, adap->tids.nftids);
466                 return -ERANGE;
467         }
468
469         return 0;
470 }
471
472 static int
473 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
474 {
475         if (flow->fs.cap)
476                 return 0; /* Hash filters */
477         return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
478                 cxgbe_validate_fidxonadd(&flow->fs,
479                                          ethdev2adap(flow->dev), fidx);
480 }
481
482 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
483 {
484         struct ch_filter_specification *fs = &flow->fs;
485         struct adapter *adap = ethdev2adap(flow->dev);
486
487         /* For tcam get the next available slot, if default value specified */
488         if (flow->fidx == FILTER_ID_MAX) {
489                 u8 nentries;
490                 int idx;
491
492                 nentries = cxgbe_filter_slots(adap, fs->type);
493                 idx = cxgbe_alloc_ftid(adap, nentries);
494                 if (idx < 0) {
495                         dev_err(adap, "unable to get a filter index in tcam\n");
496                         return -ENOMEM;
497                 }
498                 *fidx = (unsigned int)idx;
499         } else {
500                 *fidx = flow->fidx;
501         }
502
503         return 0;
504 }
505
506 static int
507 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
508 {
509         const struct rte_flow_item *i;
510         int j, index = -ENOENT;
511
512         for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
513                 if (i->type == type) {
514                         index = j;
515                         break;
516                 }
517         }
518
519         return index;
520 }
521
522 static int
523 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
524 {
525         /* nmode:
526          * BIT_0 = [src_ip],   BIT_1 = [dst_ip]
527          * BIT_2 = [src_port], BIT_3 = [dst_port]
528          *
529          * Only below cases are supported as per our spec.
530          */
531         switch (nmode) {
532         case 0:  /* 0000b */
533                 fs->nat_mode = NAT_MODE_NONE;
534                 break;
535         case 2:  /* 0010b */
536                 fs->nat_mode = NAT_MODE_DIP;
537                 break;
538         case 5:  /* 0101b */
539                 fs->nat_mode = NAT_MODE_SIP_SP;
540                 break;
541         case 7:  /* 0111b */
542                 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
543                 break;
544         case 10: /* 1010b */
545                 fs->nat_mode = NAT_MODE_DIP_DP;
546                 break;
547         case 11: /* 1011b */
548                 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
549                 break;
550         case 14: /* 1110b */
551                 fs->nat_mode = NAT_MODE_DIP_DP_SP;
552                 break;
553         case 15: /* 1111b */
554                 fs->nat_mode = NAT_MODE_ALL;
555                 break;
556         default:
557                 return -EINVAL;
558         }
559
560         return 0;
561 }
562
563 static int
564 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
565                           const struct rte_flow_item items[],
566                           uint8_t *nmode,
567                           struct ch_filter_specification *fs,
568                           struct rte_flow_error *e)
569 {
570         const struct rte_flow_action_of_set_vlan_vid *vlanid;
571         const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
572         const struct rte_flow_action_of_push_vlan *pushvlan;
573         const struct rte_flow_action_set_ipv4 *ipv4;
574         const struct rte_flow_action_set_ipv6 *ipv6;
575         const struct rte_flow_action_set_tp *tp_port;
576         const struct rte_flow_action_phy_port *port;
577         int item_index;
578         u16 tmp_vlan;
579
580         switch (a->type) {
581         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
582                 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
583                           a->conf;
584                 /* If explicitly asked to push a new VLAN header,
585                  * then don't set rewrite mode. Otherwise, the
586                  * incoming VLAN packets will get their VLAN fields
587                  * rewritten, instead of adding an additional outer
588                  * VLAN header.
589                  */
590                 if (fs->newvlan != VLAN_INSERT)
591                         fs->newvlan = VLAN_REWRITE;
592                 tmp_vlan = fs->vlan & 0xe000;
593                 fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
594                 break;
595         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
596                 vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
597                           a->conf;
598                 /* If explicitly asked to push a new VLAN header,
599                  * then don't set rewrite mode. Otherwise, the
600                  * incoming VLAN packets will get their VLAN fields
601                  * rewritten, instead of adding an additional outer
602                  * VLAN header.
603                  */
604                 if (fs->newvlan != VLAN_INSERT)
605                         fs->newvlan = VLAN_REWRITE;
606                 tmp_vlan = fs->vlan & 0xfff;
607                 fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
608                 break;
609         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
610                 pushvlan = (const struct rte_flow_action_of_push_vlan *)
611                             a->conf;
612                 if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
613                         return rte_flow_error_set(e, EINVAL,
614                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
615                                                   "only ethertype 0x8100 "
616                                                   "supported for push vlan.");
617                 fs->newvlan = VLAN_INSERT;
618                 break;
619         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
620                 fs->newvlan = VLAN_REMOVE;
621                 break;
622         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
623                 port = (const struct rte_flow_action_phy_port *)a->conf;
624                 fs->eport = port->index;
625                 break;
626         case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
627                 item_index = cxgbe_get_flow_item_index(items,
628                                                        RTE_FLOW_ITEM_TYPE_IPV4);
629                 if (item_index < 0)
630                         return rte_flow_error_set(e, EINVAL,
631                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
632                                                   "No RTE_FLOW_ITEM_TYPE_IPV4 "
633                                                   "found.");
634
635                 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
636                 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
637                 *nmode |= 1 << 0;
638                 break;
639         case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
640                 item_index = cxgbe_get_flow_item_index(items,
641                                                        RTE_FLOW_ITEM_TYPE_IPV4);
642                 if (item_index < 0)
643                         return rte_flow_error_set(e, EINVAL,
644                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
645                                                   "No RTE_FLOW_ITEM_TYPE_IPV4 "
646                                                   "found.");
647
648                 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
649                 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
650                 *nmode |= 1 << 1;
651                 break;
652         case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
653                 item_index = cxgbe_get_flow_item_index(items,
654                                                        RTE_FLOW_ITEM_TYPE_IPV6);
655                 if (item_index < 0)
656                         return rte_flow_error_set(e, EINVAL,
657                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
658                                                   "No RTE_FLOW_ITEM_TYPE_IPV6 "
659                                                   "found.");
660
661                 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
662                 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
663                 *nmode |= 1 << 0;
664                 break;
665         case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
666                 item_index = cxgbe_get_flow_item_index(items,
667                                                        RTE_FLOW_ITEM_TYPE_IPV6);
668                 if (item_index < 0)
669                         return rte_flow_error_set(e, EINVAL,
670                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
671                                                   "No RTE_FLOW_ITEM_TYPE_IPV6 "
672                                                   "found.");
673
674                 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
675                 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
676                 *nmode |= 1 << 1;
677                 break;
678         case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
679                 item_index = cxgbe_get_flow_item_index(items,
680                                                        RTE_FLOW_ITEM_TYPE_TCP);
681                 if (item_index < 0) {
682                         item_index =
683                                 cxgbe_get_flow_item_index(items,
684                                                 RTE_FLOW_ITEM_TYPE_UDP);
685                         if (item_index < 0)
686                                 return rte_flow_error_set(e, EINVAL,
687                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
688                                                 "No RTE_FLOW_ITEM_TYPE_TCP or "
689                                                 "RTE_FLOW_ITEM_TYPE_UDP found");
690                 }
691
692                 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
693                 fs->nat_fport = be16_to_cpu(tp_port->port);
694                 *nmode |= 1 << 2;
695                 break;
696         case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
697                 item_index = cxgbe_get_flow_item_index(items,
698                                                        RTE_FLOW_ITEM_TYPE_TCP);
699                 if (item_index < 0) {
700                         item_index =
701                                 cxgbe_get_flow_item_index(items,
702                                                 RTE_FLOW_ITEM_TYPE_UDP);
703                         if (item_index < 0)
704                                 return rte_flow_error_set(e, EINVAL,
705                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
706                                                 "No RTE_FLOW_ITEM_TYPE_TCP or "
707                                                 "RTE_FLOW_ITEM_TYPE_UDP found");
708                 }
709
710                 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
711                 fs->nat_lport = be16_to_cpu(tp_port->port);
712                 *nmode |= 1 << 3;
713                 break;
714         case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
715                 item_index = cxgbe_get_flow_item_index(items,
716                                                        RTE_FLOW_ITEM_TYPE_ETH);
717                 if (item_index < 0)
718                         return rte_flow_error_set(e, EINVAL,
719                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
720                                                   "No RTE_FLOW_ITEM_TYPE_ETH "
721                                                   "found");
722                 fs->swapmac = 1;
723                 break;
724         default:
725                 /* We are not supposed to come here */
726                 return rte_flow_error_set(e, EINVAL,
727                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
728                                           "Action not supported");
729         }
730
731         return 0;
732 }
733
734 static int
735 cxgbe_rtef_parse_actions(struct rte_flow *flow,
736                          const struct rte_flow_item items[],
737                          const struct rte_flow_action action[],
738                          struct rte_flow_error *e)
739 {
740         struct ch_filter_specification *fs = &flow->fs;
741         uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
742         uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
743         const struct rte_flow_action_queue *q;
744         const struct rte_flow_action *a;
745         char abit = 0;
746         int ret;
747
748         for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
749                 switch (a->type) {
750                 case RTE_FLOW_ACTION_TYPE_VOID:
751                         continue;
752                 case RTE_FLOW_ACTION_TYPE_DROP:
753                         if (abit++)
754                                 return rte_flow_error_set(e, EINVAL,
755                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
756                                                 "specify only 1 pass/drop");
757                         fs->action = FILTER_DROP;
758                         break;
759                 case RTE_FLOW_ACTION_TYPE_QUEUE:
760                         q = (const struct rte_flow_action_queue *)a->conf;
761                         if (!q)
762                                 return rte_flow_error_set(e, EINVAL,
763                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
764                                                 "specify rx queue index");
765                         if (check_rxq(flow->dev, q->index))
766                                 return rte_flow_error_set(e, EINVAL,
767                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
768                                                 "Invalid rx queue");
769                         if (abit++)
770                                 return rte_flow_error_set(e, EINVAL,
771                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
772                                                 "specify only 1 pass/drop");
773                         fs->action = FILTER_PASS;
774                         fs->dirsteer = 1;
775                         fs->iq = q->index;
776                         break;
777                 case RTE_FLOW_ACTION_TYPE_COUNT:
778                         fs->hitcnts = 1;
779                         break;
780                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
781                         vlan_set_vid++;
782                         goto action_switch;
783                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
784                         vlan_set_pcp++;
785                         goto action_switch;
786                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
787                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
788                 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
789                 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
790                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
791                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
792                         nat_ipv4++;
793                         goto action_switch;
794                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
795                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
796                         nat_ipv6++;
797                         goto action_switch;
798                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
799                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
800 action_switch:
801                         /* We allow multiple switch actions, but switch is
802                          * not compatible with either queue or drop
803                          */
804                         if (abit++ && fs->action != FILTER_SWITCH)
805                                 return rte_flow_error_set(e, EINVAL,
806                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
807                                                 "overlapping action specified");
808                         if (nat_ipv4 && nat_ipv6)
809                                 return rte_flow_error_set(e, EINVAL,
810                                         RTE_FLOW_ERROR_TYPE_ACTION, a,
811                                         "Can't have one address ipv4 and the"
812                                         " other ipv6");
813
814                         ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
815                                                         e);
816                         if (ret)
817                                 return ret;
818                         fs->action = FILTER_SWITCH;
819                         break;
820                 default:
821                         /* Not supported action : return error */
822                         return rte_flow_error_set(e, ENOTSUP,
823                                                   RTE_FLOW_ERROR_TYPE_ACTION,
824                                                   a, "Action not supported");
825                 }
826         }
827
828         if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
829                 return rte_flow_error_set(e, EINVAL,
830                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
831                                           "Both OF_SET_VLAN_VID and "
832                                           "OF_SET_VLAN_PCP must be specified");
833
834         if (ch_rte_parse_nat(nmode, fs))
835                 return rte_flow_error_set(e, EINVAL,
836                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
837                                           "invalid settings for swich action");
838         return 0;
839 }
840
841 static struct chrte_fparse parseitem[] = {
842         [RTE_FLOW_ITEM_TYPE_ETH] = {
843                 .fptr  = ch_rte_parsetype_eth,
844                 .dmask = &(const struct rte_flow_item_eth){
845                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
846                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
847                         .type = 0xffff,
848                 }
849         },
850
851         [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
852                 .fptr = ch_rte_parsetype_port,
853                 .dmask = &(const struct rte_flow_item_phy_port){
854                         .index = 0x7,
855                 }
856         },
857
858         [RTE_FLOW_ITEM_TYPE_VLAN] = {
859                 .fptr = ch_rte_parsetype_vlan,
860                 .dmask = &(const struct rte_flow_item_vlan){
861                         .tci = 0xffff,
862                         .inner_type = 0xffff,
863                 }
864         },
865
866         [RTE_FLOW_ITEM_TYPE_IPV4] = {
867                 .fptr  = ch_rte_parsetype_ipv4,
868                 .dmask = &rte_flow_item_ipv4_mask,
869         },
870
871         [RTE_FLOW_ITEM_TYPE_IPV6] = {
872                 .fptr  = ch_rte_parsetype_ipv6,
873                 .dmask = &rte_flow_item_ipv6_mask,
874         },
875
876         [RTE_FLOW_ITEM_TYPE_UDP] = {
877                 .fptr  = ch_rte_parsetype_udp,
878                 .dmask = &rte_flow_item_udp_mask,
879         },
880
881         [RTE_FLOW_ITEM_TYPE_TCP] = {
882                 .fptr  = ch_rte_parsetype_tcp,
883                 .dmask = &rte_flow_item_tcp_mask,
884         },
885 };
886
887 static int
888 cxgbe_rtef_parse_items(struct rte_flow *flow,
889                        const struct rte_flow_item items[],
890                        struct rte_flow_error *e)
891 {
892         struct adapter *adap = ethdev2adap(flow->dev);
893         const struct rte_flow_item *i;
894         char repeat[ARRAY_SIZE(parseitem)] = {0};
895
896         for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
897                 struct chrte_fparse *idx;
898                 int ret;
899
900                 if (i->type >= ARRAY_SIZE(parseitem))
901                         return rte_flow_error_set(e, ENOTSUP,
902                                                   RTE_FLOW_ERROR_TYPE_ITEM,
903                                                   i, "Item not supported");
904
905                 switch (i->type) {
906                 case RTE_FLOW_ITEM_TYPE_VOID:
907                         continue;
908                 default:
909                         /* check if item is repeated */
910                         if (repeat[i->type])
911                                 return rte_flow_error_set(e, ENOTSUP,
912                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
913                                                 "parse items cannot be repeated (except void)");
914                         repeat[i->type] = 1;
915
916                         /* No spec found for this pattern item. Skip it */
917                         if (!i->spec)
918                                 break;
919
920                         /* validate the item */
921                         ret = cxgbe_validate_item(i, e);
922                         if (ret)
923                                 return ret;
924
925                         idx = &flow->item_parser[i->type];
926                         if (!idx || !idx->fptr) {
927                                 return rte_flow_error_set(e, ENOTSUP,
928                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
929                                                 "Item not supported");
930                         } else {
931                                 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
932                                 if (ret)
933                                         return ret;
934                         }
935                 }
936         }
937
938         cxgbe_fill_filter_region(adap, &flow->fs);
939         cxgbe_tweak_filter_spec(adap, &flow->fs);
940
941         return 0;
942 }
943
944 static int
945 cxgbe_flow_parse(struct rte_flow *flow,
946                  const struct rte_flow_attr *attr,
947                  const struct rte_flow_item item[],
948                  const struct rte_flow_action action[],
949                  struct rte_flow_error *e)
950 {
951         int ret;
952         /* parse user request into ch_filter_specification */
953         ret = cxgbe_rtef_parse_attr(flow, attr, e);
954         if (ret)
955                 return ret;
956         ret = cxgbe_rtef_parse_items(flow, item, e);
957         if (ret)
958                 return ret;
959         return cxgbe_rtef_parse_actions(flow, item, action, e);
960 }
961
962 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
963 {
964         struct ch_filter_specification *fs = &flow->fs;
965         struct adapter *adap = ethdev2adap(dev);
966         struct tid_info *t = &adap->tids;
967         struct filter_ctx ctx;
968         unsigned int fidx;
969         int err;
970
971         if (cxgbe_get_fidx(flow, &fidx))
972                 return -ENOMEM;
973         if (cxgbe_verify_fidx(flow, fidx, 0))
974                 return -1;
975
976         t4_init_completion(&ctx.completion);
977         /* go create the filter */
978         err = cxgbe_set_filter(dev, fidx, fs, &ctx);
979         if (err) {
980                 dev_err(adap, "Error %d while creating filter.\n", err);
981                 return err;
982         }
983
984         /* Poll the FW for reply */
985         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
986                                         CXGBE_FLOW_POLL_MS,
987                                         CXGBE_FLOW_POLL_CNT,
988                                         &ctx.completion);
989         if (err) {
990                 dev_err(adap, "Filter set operation timed out (%d)\n", err);
991                 return err;
992         }
993         if (ctx.result) {
994                 dev_err(adap, "Hardware error %d while creating the filter.\n",
995                         ctx.result);
996                 return ctx.result;
997         }
998
999         if (fs->cap) { /* to destroy the filter */
1000                 flow->fidx = ctx.tid;
1001                 flow->f = lookup_tid(t, ctx.tid);
1002         } else {
1003                 flow->fidx = fidx;
1004                 flow->f = &adap->tids.ftid_tab[fidx];
1005         }
1006
1007         return 0;
1008 }
1009
1010 static struct rte_flow *
1011 cxgbe_flow_create(struct rte_eth_dev *dev,
1012                   const struct rte_flow_attr *attr,
1013                   const struct rte_flow_item item[],
1014                   const struct rte_flow_action action[],
1015                   struct rte_flow_error *e)
1016 {
1017         struct adapter *adap = ethdev2adap(dev);
1018         struct rte_flow *flow;
1019         int ret;
1020
1021         flow = t4_os_alloc(sizeof(struct rte_flow));
1022         if (!flow) {
1023                 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1024                                    NULL, "Unable to allocate memory for"
1025                                    " filter_entry");
1026                 return NULL;
1027         }
1028
1029         flow->item_parser = parseitem;
1030         flow->dev = dev;
1031         flow->fs.private = (void *)flow;
1032
1033         if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1034                 t4_os_free(flow);
1035                 return NULL;
1036         }
1037
1038         t4_os_lock(&adap->flow_lock);
1039         /* go, interact with cxgbe_filter */
1040         ret = __cxgbe_flow_create(dev, flow);
1041         t4_os_unlock(&adap->flow_lock);
1042         if (ret) {
1043                 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1044                                    NULL, "Unable to create flow rule");
1045                 t4_os_free(flow);
1046                 return NULL;
1047         }
1048
1049         flow->f->private = flow; /* Will be used during flush */
1050
1051         return flow;
1052 }
1053
1054 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1055 {
1056         struct adapter *adap = ethdev2adap(dev);
1057         struct filter_entry *f = flow->f;
1058         struct ch_filter_specification *fs;
1059         struct filter_ctx ctx;
1060         int err;
1061
1062         fs = &f->fs;
1063         if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1064                 return -1;
1065
1066         t4_init_completion(&ctx.completion);
1067         err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1068         if (err) {
1069                 dev_err(adap, "Error %d while deleting filter.\n", err);
1070                 return err;
1071         }
1072
1073         /* Poll the FW for reply */
1074         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1075                                         CXGBE_FLOW_POLL_MS,
1076                                         CXGBE_FLOW_POLL_CNT,
1077                                         &ctx.completion);
1078         if (err) {
1079                 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1080                 return err;
1081         }
1082         if (ctx.result) {
1083                 dev_err(adap, "Hardware error %d while deleting the filter.\n",
1084                         ctx.result);
1085                 return ctx.result;
1086         }
1087
1088         fs = &flow->fs;
1089         if (fs->mask.macidx) {
1090                 struct port_info *pi = (struct port_info *)
1091                                         (dev->data->dev_private);
1092                 int ret;
1093
1094                 ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
1095                 if (!ret)
1096                         return ret;
1097         }
1098
1099         return 0;
1100 }
1101
1102 static int
1103 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1104                    struct rte_flow_error *e)
1105 {
1106         struct adapter *adap = ethdev2adap(dev);
1107         int ret;
1108
1109         t4_os_lock(&adap->flow_lock);
1110         ret = __cxgbe_flow_destroy(dev, flow);
1111         t4_os_unlock(&adap->flow_lock);
1112         if (ret)
1113                 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1114                                           flow, "error destroying filter.");
1115         t4_os_free(flow);
1116         return 0;
1117 }
1118
1119 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1120                               u64 *byte_count)
1121 {
1122         struct adapter *adap = ethdev2adap(flow->dev);
1123         struct ch_filter_specification fs = flow->f->fs;
1124         unsigned int fidx = flow->fidx;
1125         int ret = 0;
1126
1127         ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1128         if (ret)
1129                 return ret;
1130         return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1131 }
1132
1133 static int
1134 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1135                  const struct rte_flow_action *action, void *data,
1136                  struct rte_flow_error *e)
1137 {
1138         struct adapter *adap = ethdev2adap(flow->dev);
1139         struct ch_filter_specification fs;
1140         struct rte_flow_query_count *c;
1141         struct filter_entry *f;
1142         int ret;
1143
1144         RTE_SET_USED(dev);
1145
1146         f = flow->f;
1147         fs = f->fs;
1148
1149         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1150                 return rte_flow_error_set(e, ENOTSUP,
1151                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1152                                           "only count supported for query");
1153
1154         /*
1155          * This is a valid operation, Since we are allowed to do chelsio
1156          * specific operations in rte side of our code but not vise-versa
1157          *
1158          * So, fs can be queried/modified here BUT rte_flow_query_count
1159          * cannot be worked on by the lower layer since we want to maintain
1160          * it as rte_flow agnostic.
1161          */
1162         if (!fs.hitcnts)
1163                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1164                                           &fs, "filter hit counters were not"
1165                                           " enabled during filter creation");
1166
1167         c = (struct rte_flow_query_count *)data;
1168
1169         t4_os_lock(&adap->flow_lock);
1170         ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1171         if (ret) {
1172                 rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1173                                    f, "cxgbe pmd failed to perform query");
1174                 goto out;
1175         }
1176
1177         /* Query was successful */
1178         c->bytes_set = 1;
1179         c->hits_set = 1;
1180         if (c->reset)
1181                 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1182
1183 out:
1184         t4_os_unlock(&adap->flow_lock);
1185         return ret;
1186 }
1187
1188 static int
1189 cxgbe_flow_validate(struct rte_eth_dev *dev,
1190                     const struct rte_flow_attr *attr,
1191                     const struct rte_flow_item item[],
1192                     const struct rte_flow_action action[],
1193                     struct rte_flow_error *e)
1194 {
1195         struct adapter *adap = ethdev2adap(dev);
1196         struct rte_flow *flow;
1197         unsigned int fidx;
1198         int ret = 0;
1199
1200         flow = t4_os_alloc(sizeof(struct rte_flow));
1201         if (!flow)
1202                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1203                                 NULL,
1204                                 "Unable to allocate memory for filter_entry");
1205
1206         flow->item_parser = parseitem;
1207         flow->dev = dev;
1208
1209         ret = cxgbe_flow_parse(flow, attr, item, action, e);
1210         if (ret) {
1211                 t4_os_free(flow);
1212                 return ret;
1213         }
1214
1215         if (cxgbe_validate_filter(adap, &flow->fs)) {
1216                 t4_os_free(flow);
1217                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1218                                 NULL,
1219                                 "validation failed. Check f/w config file.");
1220         }
1221
1222         t4_os_lock(&adap->flow_lock);
1223         if (cxgbe_get_fidx(flow, &fidx)) {
1224                 ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1225                                          NULL, "no memory in tcam.");
1226                 goto out;
1227         }
1228
1229         if (cxgbe_verify_fidx(flow, fidx, 0)) {
1230                 ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1231                                          NULL, "validation failed");
1232                 goto out;
1233         }
1234
1235 out:
1236         t4_os_unlock(&adap->flow_lock);
1237         t4_os_free(flow);
1238         return ret;
1239 }
1240
1241 /*
1242  * @ret : > 0 filter destroyed succsesfully
1243  *        < 0 error destroying filter
1244  *        == 1 filter not active / not found
1245  */
1246 static int
1247 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1248 {
1249         if (f && (f->valid || f->pending) &&
1250             f->dev == dev && /* Only if user has asked for this port */
1251              f->private) /* We (rte_flow) created this filter */
1252                 return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1253         return 1;
1254 }
1255
1256 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1257 {
1258         struct adapter *adap = ethdev2adap(dev);
1259         unsigned int i;
1260         int ret = 0;
1261
1262         t4_os_lock(&adap->flow_lock);
1263         if (adap->tids.ftid_tab) {
1264                 struct filter_entry *f = &adap->tids.ftid_tab[0];
1265
1266                 for (i = 0; i < adap->tids.nftids; i++, f++) {
1267                         ret = cxgbe_check_n_destroy(f, dev);
1268                         if (ret < 0) {
1269                                 rte_flow_error_set(e, ret,
1270                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1271                                                    f->private,
1272                                                    "error destroying TCAM "
1273                                                    "filter.");
1274                                 goto out;
1275                         }
1276                 }
1277         }
1278
1279         if (is_hashfilter(adap) && adap->tids.tid_tab) {
1280                 struct filter_entry *f;
1281
1282                 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1283                         f = (struct filter_entry *)adap->tids.tid_tab[i];
1284
1285                         ret = cxgbe_check_n_destroy(f, dev);
1286                         if (ret < 0) {
1287                                 rte_flow_error_set(e, ret,
1288                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1289                                                    f->private,
1290                                                    "error destroying HASH "
1291                                                    "filter.");
1292                                 goto out;
1293                         }
1294                 }
1295         }
1296
1297 out:
1298         t4_os_unlock(&adap->flow_lock);
1299         return ret >= 0 ? 0 : ret;
1300 }
1301
1302 static const struct rte_flow_ops cxgbe_flow_ops = {
1303         .validate       = cxgbe_flow_validate,
1304         .create         = cxgbe_flow_create,
1305         .destroy        = cxgbe_flow_destroy,
1306         .flush          = cxgbe_flow_flush,
1307         .query          = cxgbe_flow_query,
1308         .isolate        = NULL,
1309 };
1310
1311 int
1312 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1313                       enum rte_filter_type filter_type,
1314                       enum rte_filter_op filter_op,
1315                       void *arg)
1316 {
1317         int ret = 0;
1318
1319         RTE_SET_USED(dev);
1320         switch (filter_type) {
1321         case RTE_ETH_FILTER_GENERIC:
1322                 if (filter_op != RTE_ETH_FILTER_GET)
1323                         return -EINVAL;
1324                 *(const void **)arg = &cxgbe_flow_ops;
1325                 break;
1326         default:
1327                 ret = -ENOTSUP;
1328                 break;
1329         }
1330         return ret;
1331 }