net/cxgbe: fix double MPS alloc by flow validate and create
[dpdk.git] / drivers / net / cxgbe / cxgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
7
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9 do { \
10         if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12                                           NULL, "Redefined match item with" \
13                                           " different values found"); \
14         (fs)->val.elem = (__v); \
15         (fs)->mask.elem = (__m); \
16 } while (0)
17
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
19 do { \
20         memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21         memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
22 } while (0)
23
24 #define CXGBE_FILL_FS(v, m, elem) \
25         __CXGBE_FILL_FS(v, m, fs, elem, e)
26
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28         __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
29
30 static int
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
32 {
33         /* rte_flow specification does not allow it. */
34         if (!i->spec && (i->mask ||  i->last))
35                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36                                    i, "last or mask given without spec");
37         /*
38          * We don't support it.
39          * Although, we can support values in last as 0's or last == spec.
40          * But this will not provide user with any additional functionality
41          * and will only increase the complexity for us.
42          */
43         if (i->last)
44                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45                                    i, "last is not supported by chelsio pmd");
46         return 0;
47 }
48
49 /**
50  * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51  * there's only 40-bits available to store match fields.
52  * So, to save space, optimize filter spec for some common
53  * known fields that hardware can parse against incoming
54  * packets automatically.
55  */
56 static void
57 cxgbe_tweak_filter_spec(struct adapter *adap,
58                         struct ch_filter_specification *fs)
59 {
60         /* Save 16-bit ethertype field space, by setting corresponding
61          * 1-bit flags in the filter spec for common known ethertypes.
62          * When hardware sees these flags, it automatically infers and
63          * matches incoming packets against the corresponding ethertype.
64          */
65         if (fs->mask.ethtype == 0xffff) {
66                 switch (fs->val.ethtype) {
67                 case RTE_ETHER_TYPE_IPV4:
68                         if (adap->params.tp.ethertype_shift < 0) {
69                                 fs->type = FILTER_TYPE_IPV4;
70                                 fs->val.ethtype = 0;
71                                 fs->mask.ethtype = 0;
72                         }
73                         break;
74                 case RTE_ETHER_TYPE_IPV6:
75                         if (adap->params.tp.ethertype_shift < 0) {
76                                 fs->type = FILTER_TYPE_IPV6;
77                                 fs->val.ethtype = 0;
78                                 fs->mask.ethtype = 0;
79                         }
80                         break;
81                 case RTE_ETHER_TYPE_VLAN:
82                         if (adap->params.tp.ethertype_shift < 0 &&
83                             adap->params.tp.vlan_shift >= 0) {
84                                 fs->val.ivlan_vld = 1;
85                                 fs->mask.ivlan_vld = 1;
86                                 fs->val.ethtype = 0;
87                                 fs->mask.ethtype = 0;
88                         }
89                         break;
90                 case RTE_ETHER_TYPE_QINQ:
91                         if (adap->params.tp.ethertype_shift < 0 &&
92                             adap->params.tp.vnic_shift >= 0) {
93                                 fs->val.ovlan_vld = 1;
94                                 fs->mask.ovlan_vld = 1;
95                                 fs->val.ethtype = 0;
96                                 fs->mask.ethtype = 0;
97                         }
98                         break;
99                 default:
100                         break;
101                 }
102         }
103 }
104
105 static void
106 cxgbe_fill_filter_region(struct adapter *adap,
107                          struct ch_filter_specification *fs)
108 {
109         struct tp_params *tp = &adap->params.tp;
110         u64 hash_filter_mask = tp->hash_filter_mask;
111         u64 ntuple_mask = 0;
112
113         fs->cap = 0;
114
115         if (!is_hashfilter(adap))
116                 return;
117
118         if (fs->type) {
119                 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
120                                      0xff, 0xff, 0xff, 0xff,
121                                      0xff, 0xff, 0xff, 0xff,
122                                      0xff, 0xff, 0xff, 0xff};
123                 uint8_t bitoff[16] = {0};
124
125                 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
126                     !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
127                     memcmp(fs->mask.lip, biton, sizeof(biton)) ||
128                     memcmp(fs->mask.fip, biton, sizeof(biton)))
129                         return;
130         } else {
131                 uint32_t biton  = 0xffffffff;
132                 uint32_t bitoff = 0x0U;
133
134                 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
135                     !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
136                     memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
137                     memcmp(fs->mask.fip, &biton, sizeof(biton)))
138                         return;
139         }
140
141         if (!fs->val.lport || fs->mask.lport != 0xffff)
142                 return;
143         if (!fs->val.fport || fs->mask.fport != 0xffff)
144                 return;
145
146         if (tp->protocol_shift >= 0)
147                 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
148         if (tp->ethertype_shift >= 0)
149                 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
150         if (tp->port_shift >= 0)
151                 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
152         if (tp->macmatch_shift >= 0)
153                 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
154         if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
155                 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
156                                tp->vlan_shift;
157         if (tp->vnic_shift >= 0) {
158                 if (fs->mask.ovlan_vld)
159                         ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
160                                              fs->mask.ovlan) << tp->vnic_shift;
161                 else if (fs->mask.pfvf_vld)
162                         ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 |
163                                              fs->mask.pf << 13 |
164                                              fs->mask.vf) << tp->vnic_shift;
165         }
166         if (tp->tos_shift >= 0)
167                 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
168
169         if (ntuple_mask != hash_filter_mask)
170                 return;
171
172         fs->cap = 1;    /* use hash region */
173 }
174
175 static int
176 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
177                      struct ch_filter_specification *fs,
178                      struct rte_flow_error *e)
179 {
180         const struct rte_flow_item_eth *spec = item->spec;
181         const struct rte_flow_item_eth *umask = item->mask;
182         const struct rte_flow_item_eth *mask;
183
184         /* If user has not given any mask, then use chelsio supported mask. */
185         mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
186
187         if (!spec)
188                 return 0;
189
190         /* we don't support SRC_MAC filtering*/
191         if (!rte_is_zero_ether_addr(&mask->src))
192                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
193                                           item,
194                                           "src mac filtering not supported");
195
196         if (!rte_is_zero_ether_addr(&mask->dst)) {
197                 CXGBE_FILL_FS(0, 0x1ff, macidx);
198                 CXGBE_FILL_FS_MEMCPY(spec->dst.addr_bytes, mask->dst.addr_bytes,
199                                      dmac);
200         }
201
202         CXGBE_FILL_FS(be16_to_cpu(spec->type),
203                       be16_to_cpu(mask->type), ethtype);
204
205         return 0;
206 }
207
208 static int
209 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
210                       struct ch_filter_specification *fs,
211                       struct rte_flow_error *e)
212 {
213         const struct rte_flow_item_phy_port *val = item->spec;
214         const struct rte_flow_item_phy_port *umask = item->mask;
215         const struct rte_flow_item_phy_port *mask;
216
217         mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
218
219         if (!val)
220                 return 0; /* Wildcard, match all physical ports */
221
222         if (val->index > 0x7)
223                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
224                                           item,
225                                           "port index up to 0x7 is supported");
226
227         CXGBE_FILL_FS(val->index, mask->index, iport);
228
229         return 0;
230 }
231
232 static int
233 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
234                       struct ch_filter_specification *fs,
235                       struct rte_flow_error *e)
236 {
237         const struct rte_flow_item_vlan *spec = item->spec;
238         const struct rte_flow_item_vlan *umask = item->mask;
239         const struct rte_flow_item_vlan *mask;
240
241         /* If user has not given any mask, then use chelsio supported mask. */
242         mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
243
244         if (!fs->mask.ethtype)
245                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
246                                           item,
247                                           "Can't parse VLAN item without knowing ethertype");
248
249         /* If ethertype is already set and is not VLAN (0x8100) or
250          * QINQ(0x88A8), then don't proceed further. Otherwise,
251          * reset the outer ethertype, so that it can be replaced by
252          * innermost ethertype. Note that hardware will automatically
253          * match against VLAN or QINQ packets, based on 'ivlan_vld' or
254          * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
255          */
256         if (fs->mask.ethtype) {
257                 if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
258                     fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
259                         return rte_flow_error_set(e, EINVAL,
260                                                   RTE_FLOW_ERROR_TYPE_ITEM,
261                                                   item,
262                                                   "Ethertype must be 0x8100 or 0x88a8");
263         }
264
265         if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
266                 CXGBE_FILL_FS(1, 1, ovlan_vld);
267                 if (spec) {
268                         CXGBE_FILL_FS(be16_to_cpu(spec->tci),
269                                       be16_to_cpu(mask->tci), ovlan);
270
271                         fs->mask.ethtype = 0;
272                         fs->val.ethtype = 0;
273                 }
274         } else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {
275                 CXGBE_FILL_FS(1, 1, ivlan_vld);
276                 if (spec) {
277                         CXGBE_FILL_FS(be16_to_cpu(spec->tci),
278                                       be16_to_cpu(mask->tci), ivlan);
279
280                         fs->mask.ethtype = 0;
281                         fs->val.ethtype = 0;
282                 }
283         }
284
285         if (spec)
286                 CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
287                               be16_to_cpu(mask->inner_type), ethtype);
288
289         return 0;
290 }
291
292 static int
293 ch_rte_parsetype_pf(const void *dmask __rte_unused,
294                     const struct rte_flow_item *item __rte_unused,
295                     struct ch_filter_specification *fs,
296                     struct rte_flow_error *e __rte_unused)
297 {
298         struct rte_flow *flow = (struct rte_flow *)fs->private;
299         struct rte_eth_dev *dev = flow->dev;
300         struct adapter *adap = ethdev2adap(dev);
301
302         CXGBE_FILL_FS(1, 1, pfvf_vld);
303
304         CXGBE_FILL_FS(adap->pf, 0x7, pf);
305         return 0;
306 }
307
308 static int
309 ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item,
310                     struct ch_filter_specification *fs,
311                     struct rte_flow_error *e)
312 {
313         const struct rte_flow_item_vf *umask = item->mask;
314         const struct rte_flow_item_vf *val = item->spec;
315         const struct rte_flow_item_vf *mask;
316
317         /* If user has not given any mask, then use chelsio supported mask. */
318         mask = umask ? umask : (const struct rte_flow_item_vf *)dmask;
319
320         CXGBE_FILL_FS(1, 1, pfvf_vld);
321
322         if (!val)
323                 return 0; /* Wildcard, match all Vf */
324
325         if (val->id > UCHAR_MAX)
326                 return rte_flow_error_set(e, EINVAL,
327                                           RTE_FLOW_ERROR_TYPE_ITEM,
328                                           item,
329                                           "VF ID > MAX(255)");
330
331         CXGBE_FILL_FS(val->id, mask->id, vf);
332
333         return 0;
334 }
335
336 static int
337 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
338                      struct ch_filter_specification *fs,
339                      struct rte_flow_error *e)
340 {
341         const struct rte_flow_item_udp *val = item->spec;
342         const struct rte_flow_item_udp *umask = item->mask;
343         const struct rte_flow_item_udp *mask;
344
345         mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
346
347         if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
348                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
349                                           item,
350                                           "udp: only src/dst port supported");
351
352         CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
353         if (!val)
354                 return 0;
355         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
356                       be16_to_cpu(mask->hdr.src_port), fport);
357         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
358                       be16_to_cpu(mask->hdr.dst_port), lport);
359         return 0;
360 }
361
362 static int
363 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
364                      struct ch_filter_specification *fs,
365                      struct rte_flow_error *e)
366 {
367         const struct rte_flow_item_tcp *val = item->spec;
368         const struct rte_flow_item_tcp *umask = item->mask;
369         const struct rte_flow_item_tcp *mask;
370
371         mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
372
373         if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
374             mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
375             mask->hdr.tcp_urp)
376                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
377                                           item,
378                                           "tcp: only src/dst port supported");
379
380         CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
381         if (!val)
382                 return 0;
383         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
384                       be16_to_cpu(mask->hdr.src_port), fport);
385         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
386                       be16_to_cpu(mask->hdr.dst_port), lport);
387         return 0;
388 }
389
390 static int
391 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
392                       struct ch_filter_specification *fs,
393                       struct rte_flow_error *e)
394 {
395         const struct rte_flow_item_ipv4 *val = item->spec;
396         const struct rte_flow_item_ipv4 *umask = item->mask;
397         const struct rte_flow_item_ipv4 *mask;
398
399         mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
400
401         if (mask->hdr.time_to_live)
402                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
403                                           item, "ttl is not supported");
404
405         if (fs->mask.ethtype &&
406             (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
407                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
408                                           item,
409                                           "Couldn't find IPv4 ethertype");
410         fs->type = FILTER_TYPE_IPV4;
411         if (!val)
412                 return 0; /* ipv4 wild card */
413
414         CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
415         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
416         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
417         CXGBE_FILL_FS(val->hdr.type_of_service, mask->hdr.type_of_service, tos);
418
419         return 0;
420 }
421
422 static int
423 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
424                       struct ch_filter_specification *fs,
425                       struct rte_flow_error *e)
426 {
427         const struct rte_flow_item_ipv6 *val = item->spec;
428         const struct rte_flow_item_ipv6 *umask = item->mask;
429         const struct rte_flow_item_ipv6 *mask;
430         u32 vtc_flow, vtc_flow_mask;
431
432         mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
433
434         vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
435
436         if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
437             mask->hdr.payload_len || mask->hdr.hop_limits)
438                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
439                                           item,
440                                           "flow/hop are not supported");
441
442         if (fs->mask.ethtype &&
443             (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
444                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
445                                           item,
446                                           "Couldn't find IPv6 ethertype");
447         fs->type = FILTER_TYPE_IPV6;
448         if (!val)
449                 return 0; /* ipv6 wild card */
450
451         CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
452
453         vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
454         CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
455                       RTE_IPV6_HDR_TC_SHIFT,
456                       (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
457                       RTE_IPV6_HDR_TC_SHIFT,
458                       tos);
459
460         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
461         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
462
463         return 0;
464 }
465
466 static int
467 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
468                       struct rte_flow_error *e)
469 {
470         if (attr->egress)
471                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
472                                           attr, "attribute:<egress> is"
473                                           " not supported !");
474         if (attr->group > 0)
475                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
476                                           attr, "group parameter is"
477                                           " not supported.");
478
479         flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
480
481         return 0;
482 }
483
484 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
485 {
486         struct port_info *pi = ethdev2pinfo(dev);
487
488         if (rxq > pi->n_rx_qsets)
489                 return -EINVAL;
490         return 0;
491 }
492
493 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
494 {
495         struct adapter *adap = ethdev2adap(f->dev);
496         struct ch_filter_specification fs = f->fs;
497         u8 nentries;
498
499         if (fidx >= adap->tids.nftids) {
500                 dev_err(adap, "invalid flow index %d.\n", fidx);
501                 return -EINVAL;
502         }
503
504         nentries = cxgbe_filter_slots(adap, fs.type);
505         if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
506                 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
507                 return -EINVAL;
508         }
509
510         return 0;
511 }
512
513 static int
514 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
515                          struct adapter *adap, unsigned int fidx)
516 {
517         u8 nentries;
518
519         nentries = cxgbe_filter_slots(adap, fs->type);
520         if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
521                 dev_err(adap, "filter index: %d is busy.\n", fidx);
522                 return -EBUSY;
523         }
524
525         if (fidx >= adap->tids.nftids) {
526                 dev_err(adap, "filter index (%u) >= max(%u)\n",
527                         fidx, adap->tids.nftids);
528                 return -ERANGE;
529         }
530
531         return 0;
532 }
533
534 static int
535 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
536 {
537         if (flow->fs.cap)
538                 return 0; /* Hash filters */
539         return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
540                 cxgbe_validate_fidxonadd(&flow->fs,
541                                          ethdev2adap(flow->dev), fidx);
542 }
543
544 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
545 {
546         struct ch_filter_specification *fs = &flow->fs;
547         struct adapter *adap = ethdev2adap(flow->dev);
548
549         /* For tcam get the next available slot, if default value specified */
550         if (flow->fidx == FILTER_ID_MAX) {
551                 u8 nentries;
552                 int idx;
553
554                 nentries = cxgbe_filter_slots(adap, fs->type);
555                 idx = cxgbe_alloc_ftid(adap, nentries);
556                 if (idx < 0) {
557                         dev_err(adap, "unable to get a filter index in tcam\n");
558                         return -ENOMEM;
559                 }
560                 *fidx = (unsigned int)idx;
561         } else {
562                 *fidx = flow->fidx;
563         }
564
565         return 0;
566 }
567
568 static int
569 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
570 {
571         const struct rte_flow_item *i;
572         int j, index = -ENOENT;
573
574         for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
575                 if (i->type == type) {
576                         index = j;
577                         break;
578                 }
579         }
580
581         return index;
582 }
583
584 static int
585 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
586 {
587         /* nmode:
588          * BIT_0 = [src_ip],   BIT_1 = [dst_ip]
589          * BIT_2 = [src_port], BIT_3 = [dst_port]
590          *
591          * Only below cases are supported as per our spec.
592          */
593         switch (nmode) {
594         case 0:  /* 0000b */
595                 fs->nat_mode = NAT_MODE_NONE;
596                 break;
597         case 2:  /* 0010b */
598                 fs->nat_mode = NAT_MODE_DIP;
599                 break;
600         case 5:  /* 0101b */
601                 fs->nat_mode = NAT_MODE_SIP_SP;
602                 break;
603         case 7:  /* 0111b */
604                 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
605                 break;
606         case 10: /* 1010b */
607                 fs->nat_mode = NAT_MODE_DIP_DP;
608                 break;
609         case 11: /* 1011b */
610                 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
611                 break;
612         case 14: /* 1110b */
613                 fs->nat_mode = NAT_MODE_DIP_DP_SP;
614                 break;
615         case 15: /* 1111b */
616                 fs->nat_mode = NAT_MODE_ALL;
617                 break;
618         default:
619                 return -EINVAL;
620         }
621
622         return 0;
623 }
624
625 static int
626 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
627                           const struct rte_flow_item items[],
628                           uint8_t *nmode,
629                           struct ch_filter_specification *fs,
630                           struct rte_flow_error *e)
631 {
632         const struct rte_flow_action_of_set_vlan_vid *vlanid;
633         const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
634         const struct rte_flow_action_of_push_vlan *pushvlan;
635         const struct rte_flow_action_set_ipv4 *ipv4;
636         const struct rte_flow_action_set_ipv6 *ipv6;
637         const struct rte_flow_action_set_tp *tp_port;
638         const struct rte_flow_action_phy_port *port;
639         const struct rte_flow_action_set_mac *mac;
640         int item_index;
641         u16 tmp_vlan;
642
643         switch (a->type) {
644         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
645                 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
646                           a->conf;
647                 /* If explicitly asked to push a new VLAN header,
648                  * then don't set rewrite mode. Otherwise, the
649                  * incoming VLAN packets will get their VLAN fields
650                  * rewritten, instead of adding an additional outer
651                  * VLAN header.
652                  */
653                 if (fs->newvlan != VLAN_INSERT)
654                         fs->newvlan = VLAN_REWRITE;
655                 tmp_vlan = fs->vlan & 0xe000;
656                 fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
657                 break;
658         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
659                 vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
660                           a->conf;
661                 /* If explicitly asked to push a new VLAN header,
662                  * then don't set rewrite mode. Otherwise, the
663                  * incoming VLAN packets will get their VLAN fields
664                  * rewritten, instead of adding an additional outer
665                  * VLAN header.
666                  */
667                 if (fs->newvlan != VLAN_INSERT)
668                         fs->newvlan = VLAN_REWRITE;
669                 tmp_vlan = fs->vlan & 0xfff;
670                 fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
671                 break;
672         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
673                 pushvlan = (const struct rte_flow_action_of_push_vlan *)
674                             a->conf;
675                 if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
676                         return rte_flow_error_set(e, EINVAL,
677                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
678                                                   "only ethertype 0x8100 "
679                                                   "supported for push vlan.");
680                 fs->newvlan = VLAN_INSERT;
681                 break;
682         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
683                 fs->newvlan = VLAN_REMOVE;
684                 break;
685         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
686                 port = (const struct rte_flow_action_phy_port *)a->conf;
687                 fs->eport = port->index;
688                 break;
689         case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
690                 item_index = cxgbe_get_flow_item_index(items,
691                                                        RTE_FLOW_ITEM_TYPE_IPV4);
692                 if (item_index < 0)
693                         return rte_flow_error_set(e, EINVAL,
694                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
695                                                   "No RTE_FLOW_ITEM_TYPE_IPV4 "
696                                                   "found.");
697
698                 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
699                 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
700                 *nmode |= 1 << 0;
701                 break;
702         case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
703                 item_index = cxgbe_get_flow_item_index(items,
704                                                        RTE_FLOW_ITEM_TYPE_IPV4);
705                 if (item_index < 0)
706                         return rte_flow_error_set(e, EINVAL,
707                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
708                                                   "No RTE_FLOW_ITEM_TYPE_IPV4 "
709                                                   "found.");
710
711                 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
712                 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
713                 *nmode |= 1 << 1;
714                 break;
715         case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
716                 item_index = cxgbe_get_flow_item_index(items,
717                                                        RTE_FLOW_ITEM_TYPE_IPV6);
718                 if (item_index < 0)
719                         return rte_flow_error_set(e, EINVAL,
720                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
721                                                   "No RTE_FLOW_ITEM_TYPE_IPV6 "
722                                                   "found.");
723
724                 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
725                 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
726                 *nmode |= 1 << 0;
727                 break;
728         case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
729                 item_index = cxgbe_get_flow_item_index(items,
730                                                        RTE_FLOW_ITEM_TYPE_IPV6);
731                 if (item_index < 0)
732                         return rte_flow_error_set(e, EINVAL,
733                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
734                                                   "No RTE_FLOW_ITEM_TYPE_IPV6 "
735                                                   "found.");
736
737                 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
738                 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
739                 *nmode |= 1 << 1;
740                 break;
741         case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
742                 item_index = cxgbe_get_flow_item_index(items,
743                                                        RTE_FLOW_ITEM_TYPE_TCP);
744                 if (item_index < 0) {
745                         item_index =
746                                 cxgbe_get_flow_item_index(items,
747                                                 RTE_FLOW_ITEM_TYPE_UDP);
748                         if (item_index < 0)
749                                 return rte_flow_error_set(e, EINVAL,
750                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
751                                                 "No RTE_FLOW_ITEM_TYPE_TCP or "
752                                                 "RTE_FLOW_ITEM_TYPE_UDP found");
753                 }
754
755                 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
756                 fs->nat_fport = be16_to_cpu(tp_port->port);
757                 *nmode |= 1 << 2;
758                 break;
759         case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
760                 item_index = cxgbe_get_flow_item_index(items,
761                                                        RTE_FLOW_ITEM_TYPE_TCP);
762                 if (item_index < 0) {
763                         item_index =
764                                 cxgbe_get_flow_item_index(items,
765                                                 RTE_FLOW_ITEM_TYPE_UDP);
766                         if (item_index < 0)
767                                 return rte_flow_error_set(e, EINVAL,
768                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
769                                                 "No RTE_FLOW_ITEM_TYPE_TCP or "
770                                                 "RTE_FLOW_ITEM_TYPE_UDP found");
771                 }
772
773                 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
774                 fs->nat_lport = be16_to_cpu(tp_port->port);
775                 *nmode |= 1 << 3;
776                 break;
777         case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
778                 item_index = cxgbe_get_flow_item_index(items,
779                                                        RTE_FLOW_ITEM_TYPE_ETH);
780                 if (item_index < 0)
781                         return rte_flow_error_set(e, EINVAL,
782                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
783                                                   "No RTE_FLOW_ITEM_TYPE_ETH "
784                                                   "found");
785                 fs->swapmac = 1;
786                 break;
787         case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
788                 item_index = cxgbe_get_flow_item_index(items,
789                                                        RTE_FLOW_ITEM_TYPE_ETH);
790                 if (item_index < 0)
791                         return rte_flow_error_set(e, EINVAL,
792                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
793                                                   "No RTE_FLOW_ITEM_TYPE_ETH "
794                                                   "found");
795                 mac = (const struct rte_flow_action_set_mac *)a->conf;
796
797                 fs->newsmac = 1;
798                 memcpy(fs->smac, mac->mac_addr, sizeof(fs->smac));
799                 break;
800         case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
801                 item_index = cxgbe_get_flow_item_index(items,
802                                                        RTE_FLOW_ITEM_TYPE_ETH);
803                 if (item_index < 0)
804                         return rte_flow_error_set(e, EINVAL,
805                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
806                                                   "No RTE_FLOW_ITEM_TYPE_ETH found");
807                 mac = (const struct rte_flow_action_set_mac *)a->conf;
808
809                 fs->newdmac = 1;
810                 memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac));
811                 break;
812         default:
813                 /* We are not supposed to come here */
814                 return rte_flow_error_set(e, EINVAL,
815                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
816                                           "Action not supported");
817         }
818
819         return 0;
820 }
821
822 static int
823 cxgbe_rtef_parse_actions(struct rte_flow *flow,
824                          const struct rte_flow_item items[],
825                          const struct rte_flow_action action[],
826                          struct rte_flow_error *e)
827 {
828         struct ch_filter_specification *fs = &flow->fs;
829         uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
830         uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
831         const struct rte_flow_action_queue *q;
832         const struct rte_flow_action *a;
833         char abit = 0;
834         int ret;
835
836         for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
837                 switch (a->type) {
838                 case RTE_FLOW_ACTION_TYPE_VOID:
839                         continue;
840                 case RTE_FLOW_ACTION_TYPE_DROP:
841                         if (abit++)
842                                 return rte_flow_error_set(e, EINVAL,
843                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
844                                                 "specify only 1 pass/drop");
845                         fs->action = FILTER_DROP;
846                         break;
847                 case RTE_FLOW_ACTION_TYPE_QUEUE:
848                         q = (const struct rte_flow_action_queue *)a->conf;
849                         if (!q)
850                                 return rte_flow_error_set(e, EINVAL,
851                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
852                                                 "specify rx queue index");
853                         if (check_rxq(flow->dev, q->index))
854                                 return rte_flow_error_set(e, EINVAL,
855                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
856                                                 "Invalid rx queue");
857                         if (abit++)
858                                 return rte_flow_error_set(e, EINVAL,
859                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
860                                                 "specify only 1 pass/drop");
861                         fs->action = FILTER_PASS;
862                         fs->dirsteer = 1;
863                         fs->iq = q->index;
864                         break;
865                 case RTE_FLOW_ACTION_TYPE_COUNT:
866                         fs->hitcnts = 1;
867                         break;
868                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
869                         vlan_set_vid++;
870                         goto action_switch;
871                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
872                         vlan_set_pcp++;
873                         goto action_switch;
874                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
875                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
876                 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
877                 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
878                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
879                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
880                         nat_ipv4++;
881                         goto action_switch;
882                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
883                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
884                         nat_ipv6++;
885                         goto action_switch;
886                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
887                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
888                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
889                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
890 action_switch:
891                         /* We allow multiple switch actions, but switch is
892                          * not compatible with either queue or drop
893                          */
894                         if (abit++ && fs->action != FILTER_SWITCH)
895                                 return rte_flow_error_set(e, EINVAL,
896                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
897                                                 "overlapping action specified");
898                         if (nat_ipv4 && nat_ipv6)
899                                 return rte_flow_error_set(e, EINVAL,
900                                         RTE_FLOW_ERROR_TYPE_ACTION, a,
901                                         "Can't have one address ipv4 and the"
902                                         " other ipv6");
903
904                         ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
905                                                         e);
906                         if (ret)
907                                 return ret;
908                         fs->action = FILTER_SWITCH;
909                         break;
910                 default:
911                         /* Not supported action : return error */
912                         return rte_flow_error_set(e, ENOTSUP,
913                                                   RTE_FLOW_ERROR_TYPE_ACTION,
914                                                   a, "Action not supported");
915                 }
916         }
917
918         if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
919                 return rte_flow_error_set(e, EINVAL,
920                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
921                                           "Both OF_SET_VLAN_VID and "
922                                           "OF_SET_VLAN_PCP must be specified");
923
924         if (ch_rte_parse_nat(nmode, fs))
925                 return rte_flow_error_set(e, EINVAL,
926                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
927                                           "invalid settings for swich action");
928         return 0;
929 }
930
931 static struct chrte_fparse parseitem[] = {
932         [RTE_FLOW_ITEM_TYPE_ETH] = {
933                 .fptr  = ch_rte_parsetype_eth,
934                 .dmask = &(const struct rte_flow_item_eth){
935                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
936                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
937                         .type = 0xffff,
938                 }
939         },
940
941         [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
942                 .fptr = ch_rte_parsetype_port,
943                 .dmask = &(const struct rte_flow_item_phy_port){
944                         .index = 0x7,
945                 }
946         },
947
948         [RTE_FLOW_ITEM_TYPE_VLAN] = {
949                 .fptr = ch_rte_parsetype_vlan,
950                 .dmask = &(const struct rte_flow_item_vlan){
951                         .tci = 0xffff,
952                         .inner_type = 0xffff,
953                 }
954         },
955
956         [RTE_FLOW_ITEM_TYPE_IPV4] = {
957                 .fptr  = ch_rte_parsetype_ipv4,
958                 .dmask = &(const struct rte_flow_item_ipv4) {
959                         .hdr = {
960                                 .src_addr = RTE_BE32(0xffffffff),
961                                 .dst_addr = RTE_BE32(0xffffffff),
962                                 .type_of_service = 0xff,
963                         },
964                 },
965         },
966
967         [RTE_FLOW_ITEM_TYPE_IPV6] = {
968                 .fptr  = ch_rte_parsetype_ipv6,
969                 .dmask = &(const struct rte_flow_item_ipv6) {
970                         .hdr = {
971                                 .src_addr =
972                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
973                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
974                                 .dst_addr =
975                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
976                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
977                                 .vtc_flow = RTE_BE32(0xff000000),
978                         },
979                 },
980         },
981
982         [RTE_FLOW_ITEM_TYPE_UDP] = {
983                 .fptr  = ch_rte_parsetype_udp,
984                 .dmask = &rte_flow_item_udp_mask,
985         },
986
987         [RTE_FLOW_ITEM_TYPE_TCP] = {
988                 .fptr  = ch_rte_parsetype_tcp,
989                 .dmask = &rte_flow_item_tcp_mask,
990         },
991
992         [RTE_FLOW_ITEM_TYPE_PF] = {
993                 .fptr = ch_rte_parsetype_pf,
994                 .dmask = NULL,
995         },
996
997         [RTE_FLOW_ITEM_TYPE_VF] = {
998                 .fptr = ch_rte_parsetype_vf,
999                 .dmask = &(const struct rte_flow_item_vf){
1000                         .id = 0xffffffff,
1001                 }
1002         },
1003 };
1004
1005 static int
1006 cxgbe_rtef_parse_items(struct rte_flow *flow,
1007                        const struct rte_flow_item items[],
1008                        struct rte_flow_error *e)
1009 {
1010         struct adapter *adap = ethdev2adap(flow->dev);
1011         const struct rte_flow_item *i;
1012         char repeat[ARRAY_SIZE(parseitem)] = {0};
1013
1014         for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
1015                 struct chrte_fparse *idx;
1016                 int ret;
1017
1018                 if (i->type >= ARRAY_SIZE(parseitem))
1019                         return rte_flow_error_set(e, ENOTSUP,
1020                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1021                                                   i, "Item not supported");
1022
1023                 switch (i->type) {
1024                 case RTE_FLOW_ITEM_TYPE_VOID:
1025                         continue;
1026                 default:
1027                         /* check if item is repeated */
1028                         if (repeat[i->type] &&
1029                             i->type != RTE_FLOW_ITEM_TYPE_VLAN)
1030                                 return rte_flow_error_set(e, ENOTSUP,
1031                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
1032                                                 "parse items cannot be repeated(except void/vlan)");
1033
1034                         repeat[i->type] = 1;
1035
1036                         /* validate the item */
1037                         ret = cxgbe_validate_item(i, e);
1038                         if (ret)
1039                                 return ret;
1040
1041                         idx = &flow->item_parser[i->type];
1042                         if (!idx || !idx->fptr) {
1043                                 return rte_flow_error_set(e, ENOTSUP,
1044                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
1045                                                 "Item not supported");
1046                         } else {
1047                                 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
1048                                 if (ret)
1049                                         return ret;
1050                         }
1051                 }
1052         }
1053
1054         cxgbe_fill_filter_region(adap, &flow->fs);
1055         cxgbe_tweak_filter_spec(adap, &flow->fs);
1056
1057         return 0;
1058 }
1059
1060 static int
1061 cxgbe_flow_parse(struct rte_flow *flow,
1062                  const struct rte_flow_attr *attr,
1063                  const struct rte_flow_item item[],
1064                  const struct rte_flow_action action[],
1065                  struct rte_flow_error *e)
1066 {
1067         int ret;
1068         /* parse user request into ch_filter_specification */
1069         ret = cxgbe_rtef_parse_attr(flow, attr, e);
1070         if (ret)
1071                 return ret;
1072         ret = cxgbe_rtef_parse_items(flow, item, e);
1073         if (ret)
1074                 return ret;
1075         return cxgbe_rtef_parse_actions(flow, item, action, e);
1076 }
1077
1078 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
1079 {
1080         struct ch_filter_specification *fs = &flow->fs;
1081         struct adapter *adap = ethdev2adap(dev);
1082         struct tid_info *t = &adap->tids;
1083         struct filter_ctx ctx;
1084         unsigned int fidx;
1085         int err;
1086
1087         if (cxgbe_get_fidx(flow, &fidx))
1088                 return -ENOMEM;
1089         if (cxgbe_verify_fidx(flow, fidx, 0))
1090                 return -1;
1091
1092         t4_init_completion(&ctx.completion);
1093         /* go create the filter */
1094         err = cxgbe_set_filter(dev, fidx, fs, &ctx);
1095         if (err) {
1096                 dev_err(adap, "Error %d while creating filter.\n", err);
1097                 return err;
1098         }
1099
1100         /* Poll the FW for reply */
1101         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1102                                         CXGBE_FLOW_POLL_MS,
1103                                         CXGBE_FLOW_POLL_CNT,
1104                                         &ctx.completion);
1105         if (err) {
1106                 dev_err(adap, "Filter set operation timed out (%d)\n", err);
1107                 return err;
1108         }
1109         if (ctx.result) {
1110                 dev_err(adap, "Hardware error %d while creating the filter.\n",
1111                         ctx.result);
1112                 return ctx.result;
1113         }
1114
1115         if (fs->cap) { /* to destroy the filter */
1116                 flow->fidx = ctx.tid;
1117                 flow->f = lookup_tid(t, ctx.tid);
1118         } else {
1119                 flow->fidx = fidx;
1120                 flow->f = &adap->tids.ftid_tab[fidx];
1121         }
1122
1123         return 0;
1124 }
1125
1126 static struct rte_flow *
1127 cxgbe_flow_create(struct rte_eth_dev *dev,
1128                   const struct rte_flow_attr *attr,
1129                   const struct rte_flow_item item[],
1130                   const struct rte_flow_action action[],
1131                   struct rte_flow_error *e)
1132 {
1133         struct adapter *adap = ethdev2adap(dev);
1134         struct rte_flow *flow;
1135         int ret;
1136
1137         flow = t4_os_alloc(sizeof(struct rte_flow));
1138         if (!flow) {
1139                 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1140                                    NULL, "Unable to allocate memory for"
1141                                    " filter_entry");
1142                 return NULL;
1143         }
1144
1145         flow->item_parser = parseitem;
1146         flow->dev = dev;
1147         flow->fs.private = (void *)flow;
1148
1149         if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1150                 t4_os_free(flow);
1151                 return NULL;
1152         }
1153
1154         t4_os_lock(&adap->flow_lock);
1155         /* go, interact with cxgbe_filter */
1156         ret = __cxgbe_flow_create(dev, flow);
1157         t4_os_unlock(&adap->flow_lock);
1158         if (ret) {
1159                 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1160                                    NULL, "Unable to create flow rule");
1161                 t4_os_free(flow);
1162                 return NULL;
1163         }
1164
1165         flow->f->private = flow; /* Will be used during flush */
1166
1167         return flow;
1168 }
1169
1170 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1171 {
1172         struct adapter *adap = ethdev2adap(dev);
1173         struct filter_entry *f = flow->f;
1174         struct ch_filter_specification *fs;
1175         struct filter_ctx ctx;
1176         int err;
1177
1178         fs = &f->fs;
1179         if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1180                 return -1;
1181
1182         t4_init_completion(&ctx.completion);
1183         err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1184         if (err) {
1185                 dev_err(adap, "Error %d while deleting filter.\n", err);
1186                 return err;
1187         }
1188
1189         /* Poll the FW for reply */
1190         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1191                                         CXGBE_FLOW_POLL_MS,
1192                                         CXGBE_FLOW_POLL_CNT,
1193                                         &ctx.completion);
1194         if (err) {
1195                 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1196                 return err;
1197         }
1198         if (ctx.result) {
1199                 dev_err(adap, "Hardware error %d while deleting the filter.\n",
1200                         ctx.result);
1201                 return ctx.result;
1202         }
1203
1204         return 0;
1205 }
1206
1207 static int
1208 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1209                    struct rte_flow_error *e)
1210 {
1211         struct adapter *adap = ethdev2adap(dev);
1212         int ret;
1213
1214         t4_os_lock(&adap->flow_lock);
1215         ret = __cxgbe_flow_destroy(dev, flow);
1216         t4_os_unlock(&adap->flow_lock);
1217         if (ret)
1218                 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1219                                           flow, "error destroying filter.");
1220         t4_os_free(flow);
1221         return 0;
1222 }
1223
1224 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1225                               u64 *byte_count)
1226 {
1227         struct adapter *adap = ethdev2adap(flow->dev);
1228         struct ch_filter_specification fs = flow->f->fs;
1229         unsigned int fidx = flow->fidx;
1230         int ret = 0;
1231
1232         ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1233         if (ret)
1234                 return ret;
1235         return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1236 }
1237
1238 static int
1239 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1240                  const struct rte_flow_action *action, void *data,
1241                  struct rte_flow_error *e)
1242 {
1243         struct adapter *adap = ethdev2adap(flow->dev);
1244         struct ch_filter_specification fs;
1245         struct rte_flow_query_count *c;
1246         struct filter_entry *f;
1247         int ret;
1248
1249         RTE_SET_USED(dev);
1250
1251         f = flow->f;
1252         fs = f->fs;
1253
1254         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1255                 return rte_flow_error_set(e, ENOTSUP,
1256                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1257                                           "only count supported for query");
1258
1259         /*
1260          * This is a valid operation, Since we are allowed to do chelsio
1261          * specific operations in rte side of our code but not vise-versa
1262          *
1263          * So, fs can be queried/modified here BUT rte_flow_query_count
1264          * cannot be worked on by the lower layer since we want to maintain
1265          * it as rte_flow agnostic.
1266          */
1267         if (!fs.hitcnts)
1268                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1269                                           &fs, "filter hit counters were not"
1270                                           " enabled during filter creation");
1271
1272         c = (struct rte_flow_query_count *)data;
1273
1274         t4_os_lock(&adap->flow_lock);
1275         ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1276         if (ret) {
1277                 rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1278                                    f, "cxgbe pmd failed to perform query");
1279                 goto out;
1280         }
1281
1282         /* Query was successful */
1283         c->bytes_set = 1;
1284         c->hits_set = 1;
1285         if (c->reset)
1286                 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1287
1288 out:
1289         t4_os_unlock(&adap->flow_lock);
1290         return ret;
1291 }
1292
1293 static int
1294 cxgbe_flow_validate(struct rte_eth_dev *dev,
1295                     const struct rte_flow_attr *attr,
1296                     const struct rte_flow_item item[],
1297                     const struct rte_flow_action action[],
1298                     struct rte_flow_error *e)
1299 {
1300         struct adapter *adap = ethdev2adap(dev);
1301         struct rte_flow *flow;
1302         unsigned int fidx;
1303         int ret = 0;
1304
1305         flow = t4_os_alloc(sizeof(struct rte_flow));
1306         if (!flow)
1307                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1308                                 NULL,
1309                                 "Unable to allocate memory for filter_entry");
1310
1311         flow->item_parser = parseitem;
1312         flow->dev = dev;
1313
1314         ret = cxgbe_flow_parse(flow, attr, item, action, e);
1315         if (ret) {
1316                 t4_os_free(flow);
1317                 return ret;
1318         }
1319
1320         if (cxgbe_validate_filter(adap, &flow->fs)) {
1321                 t4_os_free(flow);
1322                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1323                                 NULL,
1324                                 "validation failed. Check f/w config file.");
1325         }
1326
1327         t4_os_lock(&adap->flow_lock);
1328         if (cxgbe_get_fidx(flow, &fidx)) {
1329                 ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1330                                          NULL, "no memory in tcam.");
1331                 goto out;
1332         }
1333
1334         if (cxgbe_verify_fidx(flow, fidx, 0)) {
1335                 ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1336                                          NULL, "validation failed");
1337                 goto out;
1338         }
1339
1340 out:
1341         t4_os_unlock(&adap->flow_lock);
1342         t4_os_free(flow);
1343         return ret;
1344 }
1345
1346 /*
1347  * @ret : > 0 filter destroyed succsesfully
1348  *        < 0 error destroying filter
1349  *        == 1 filter not active / not found
1350  */
1351 static int
1352 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1353 {
1354         if (f && (f->valid || f->pending) &&
1355             f->dev == dev && /* Only if user has asked for this port */
1356              f->private) /* We (rte_flow) created this filter */
1357                 return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1358         return 1;
1359 }
1360
1361 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1362 {
1363         struct adapter *adap = ethdev2adap(dev);
1364         unsigned int i;
1365         int ret = 0;
1366
1367         t4_os_lock(&adap->flow_lock);
1368         if (adap->tids.ftid_tab) {
1369                 struct filter_entry *f = &adap->tids.ftid_tab[0];
1370
1371                 for (i = 0; i < adap->tids.nftids; i++, f++) {
1372                         ret = cxgbe_check_n_destroy(f, dev);
1373                         if (ret < 0) {
1374                                 rte_flow_error_set(e, ret,
1375                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1376                                                    f->private,
1377                                                    "error destroying TCAM "
1378                                                    "filter.");
1379                                 goto out;
1380                         }
1381                 }
1382         }
1383
1384         if (is_hashfilter(adap) && adap->tids.tid_tab) {
1385                 struct filter_entry *f;
1386
1387                 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1388                         f = (struct filter_entry *)adap->tids.tid_tab[i];
1389
1390                         ret = cxgbe_check_n_destroy(f, dev);
1391                         if (ret < 0) {
1392                                 rte_flow_error_set(e, ret,
1393                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1394                                                    f->private,
1395                                                    "error destroying HASH "
1396                                                    "filter.");
1397                                 goto out;
1398                         }
1399                 }
1400         }
1401
1402 out:
1403         t4_os_unlock(&adap->flow_lock);
1404         return ret >= 0 ? 0 : ret;
1405 }
1406
1407 static const struct rte_flow_ops cxgbe_flow_ops = {
1408         .validate       = cxgbe_flow_validate,
1409         .create         = cxgbe_flow_create,
1410         .destroy        = cxgbe_flow_destroy,
1411         .flush          = cxgbe_flow_flush,
1412         .query          = cxgbe_flow_query,
1413         .isolate        = NULL,
1414 };
1415
1416 int
1417 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1418                       enum rte_filter_type filter_type,
1419                       enum rte_filter_op filter_op,
1420                       void *arg)
1421 {
1422         int ret = 0;
1423
1424         RTE_SET_USED(dev);
1425         switch (filter_type) {
1426         case RTE_ETH_FILTER_GENERIC:
1427                 if (filter_op != RTE_ETH_FILTER_GET)
1428                         return -EINVAL;
1429                 *(const void **)arg = &cxgbe_flow_ops;
1430                 break;
1431         default:
1432                 ret = -ENOTSUP;
1433                 break;
1434         }
1435         return ret;
1436 }