net/cxgbe: support flow API for matching IP TOS
[dpdk.git] / drivers / net / cxgbe / cxgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
7
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9 do { \
10         if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12                                           NULL, "Redefined match item with" \
13                                           " different values found"); \
14         (fs)->val.elem = (__v); \
15         (fs)->mask.elem = (__m); \
16 } while (0)
17
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
19 do { \
20         memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21         memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
22 } while (0)
23
24 #define CXGBE_FILL_FS(v, m, elem) \
25         __CXGBE_FILL_FS(v, m, fs, elem, e)
26
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28         __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
29
30 static int
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
32 {
33         /* rte_flow specification does not allow it. */
34         if (!i->spec && (i->mask ||  i->last))
35                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36                                    i, "last or mask given without spec");
37         /*
38          * We don't support it.
39          * Although, we can support values in last as 0's or last == spec.
40          * But this will not provide user with any additional functionality
41          * and will only increase the complexity for us.
42          */
43         if (i->last)
44                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45                                    i, "last is not supported by chelsio pmd");
46         return 0;
47 }
48
49 /**
50  * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51  * there's only 40-bits available to store match fields.
52  * So, to save space, optimize filter spec for some common
53  * known fields that hardware can parse against incoming
54  * packets automatically.
55  */
56 static void
57 cxgbe_tweak_filter_spec(struct adapter *adap,
58                         struct ch_filter_specification *fs)
59 {
60         /* Save 16-bit ethertype field space, by setting corresponding
61          * 1-bit flags in the filter spec for common known ethertypes.
62          * When hardware sees these flags, it automatically infers and
63          * matches incoming packets against the corresponding ethertype.
64          */
65         if (fs->mask.ethtype == 0xffff) {
66                 switch (fs->val.ethtype) {
67                 case RTE_ETHER_TYPE_IPV4:
68                         if (adap->params.tp.ethertype_shift < 0) {
69                                 fs->type = FILTER_TYPE_IPV4;
70                                 fs->val.ethtype = 0;
71                                 fs->mask.ethtype = 0;
72                         }
73                         break;
74                 case RTE_ETHER_TYPE_IPV6:
75                         if (adap->params.tp.ethertype_shift < 0) {
76                                 fs->type = FILTER_TYPE_IPV6;
77                                 fs->val.ethtype = 0;
78                                 fs->mask.ethtype = 0;
79                         }
80                         break;
81                 case RTE_ETHER_TYPE_VLAN:
82                         if (adap->params.tp.ethertype_shift < 0 &&
83                             adap->params.tp.vlan_shift >= 0) {
84                                 fs->val.ivlan_vld = 1;
85                                 fs->mask.ivlan_vld = 1;
86                                 fs->val.ethtype = 0;
87                                 fs->mask.ethtype = 0;
88                         }
89                         break;
90                 case RTE_ETHER_TYPE_QINQ:
91                         if (adap->params.tp.ethertype_shift < 0 &&
92                             adap->params.tp.vnic_shift >= 0) {
93                                 fs->val.ovlan_vld = 1;
94                                 fs->mask.ovlan_vld = 1;
95                                 fs->val.ethtype = 0;
96                                 fs->mask.ethtype = 0;
97                         }
98                         break;
99                 default:
100                         break;
101                 }
102         }
103 }
104
105 static void
106 cxgbe_fill_filter_region(struct adapter *adap,
107                          struct ch_filter_specification *fs)
108 {
109         struct tp_params *tp = &adap->params.tp;
110         u64 hash_filter_mask = tp->hash_filter_mask;
111         u64 ntuple_mask = 0;
112
113         fs->cap = 0;
114
115         if (!is_hashfilter(adap))
116                 return;
117
118         if (fs->type) {
119                 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
120                                      0xff, 0xff, 0xff, 0xff,
121                                      0xff, 0xff, 0xff, 0xff,
122                                      0xff, 0xff, 0xff, 0xff};
123                 uint8_t bitoff[16] = {0};
124
125                 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
126                     !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
127                     memcmp(fs->mask.lip, biton, sizeof(biton)) ||
128                     memcmp(fs->mask.fip, biton, sizeof(biton)))
129                         return;
130         } else {
131                 uint32_t biton  = 0xffffffff;
132                 uint32_t bitoff = 0x0U;
133
134                 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
135                     !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
136                     memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
137                     memcmp(fs->mask.fip, &biton, sizeof(biton)))
138                         return;
139         }
140
141         if (!fs->val.lport || fs->mask.lport != 0xffff)
142                 return;
143         if (!fs->val.fport || fs->mask.fport != 0xffff)
144                 return;
145
146         if (tp->protocol_shift >= 0)
147                 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
148         if (tp->ethertype_shift >= 0)
149                 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
150         if (tp->port_shift >= 0)
151                 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
152         if (tp->macmatch_shift >= 0)
153                 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
154         if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
155                 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
156                                tp->vlan_shift;
157         if (tp->vnic_shift >= 0 && fs->mask.ovlan_vld)
158                 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ovlan) <<
159                                tp->vnic_shift;
160         if (tp->tos_shift >= 0)
161                 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
162
163         if (ntuple_mask != hash_filter_mask)
164                 return;
165
166         fs->cap = 1;    /* use hash region */
167 }
168
169 static int
170 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
171                      struct ch_filter_specification *fs,
172                      struct rte_flow_error *e)
173 {
174         const struct rte_flow_item_eth *spec = item->spec;
175         const struct rte_flow_item_eth *umask = item->mask;
176         const struct rte_flow_item_eth *mask;
177
178         /* If user has not given any mask, then use chelsio supported mask. */
179         mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
180
181         if (!spec)
182                 return 0;
183
184         /* we don't support SRC_MAC filtering*/
185         if (!rte_is_zero_ether_addr(&mask->src))
186                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
187                                           item,
188                                           "src mac filtering not supported");
189
190         if (!rte_is_zero_ether_addr(&mask->dst)) {
191                 const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
192                 const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
193                 struct rte_flow *flow = (struct rte_flow *)fs->private;
194                 struct port_info *pi = (struct port_info *)
195                                         (flow->dev->data->dev_private);
196                 int idx;
197
198                 idx = cxgbe_mpstcam_alloc(pi, addr, m);
199                 if (idx <= 0)
200                         return rte_flow_error_set(e, idx,
201                                                   RTE_FLOW_ERROR_TYPE_ITEM,
202                                                   NULL, "unable to allocate mac"
203                                                   " entry in h/w");
204                 CXGBE_FILL_FS(idx, 0x1ff, macidx);
205         }
206
207         CXGBE_FILL_FS(be16_to_cpu(spec->type),
208                       be16_to_cpu(mask->type), ethtype);
209
210         return 0;
211 }
212
213 static int
214 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
215                       struct ch_filter_specification *fs,
216                       struct rte_flow_error *e)
217 {
218         const struct rte_flow_item_phy_port *val = item->spec;
219         const struct rte_flow_item_phy_port *umask = item->mask;
220         const struct rte_flow_item_phy_port *mask;
221
222         mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
223
224         if (val->index > 0x7)
225                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
226                                           item,
227                                           "port index upto 0x7 is supported");
228
229         CXGBE_FILL_FS(val->index, mask->index, iport);
230
231         return 0;
232 }
233
234 static int
235 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
236                       struct ch_filter_specification *fs,
237                       struct rte_flow_error *e)
238 {
239         const struct rte_flow_item_vlan *spec = item->spec;
240         const struct rte_flow_item_vlan *umask = item->mask;
241         const struct rte_flow_item_vlan *mask;
242
243         /* If user has not given any mask, then use chelsio supported mask. */
244         mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
245
246         if (!fs->mask.ethtype)
247                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
248                                           item,
249                                           "Can't parse VLAN item without knowing ethertype");
250
251         /* If ethertype is already set and is not VLAN (0x8100) or
252          * QINQ(0x88A8), then don't proceed further. Otherwise,
253          * reset the outer ethertype, so that it can be replaced by
254          * innermost ethertype. Note that hardware will automatically
255          * match against VLAN or QINQ packets, based on 'ivlan_vld' or
256          * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
257          */
258         if (fs->mask.ethtype) {
259                 if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
260                     fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
261                         return rte_flow_error_set(e, EINVAL,
262                                                   RTE_FLOW_ERROR_TYPE_ITEM,
263                                                   item,
264                                                   "Ethertype must be 0x8100 or 0x88a8");
265         }
266
267         if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
268                 CXGBE_FILL_FS(1, 1, ovlan_vld);
269                 if (spec) {
270                         CXGBE_FILL_FS(be16_to_cpu(spec->tci),
271                                       be16_to_cpu(mask->tci), ovlan);
272
273                         fs->mask.ethtype = 0;
274                         fs->val.ethtype = 0;
275                 }
276         } else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {
277                 CXGBE_FILL_FS(1, 1, ivlan_vld);
278                 if (spec) {
279                         CXGBE_FILL_FS(be16_to_cpu(spec->tci),
280                                       be16_to_cpu(mask->tci), ivlan);
281
282                         fs->mask.ethtype = 0;
283                         fs->val.ethtype = 0;
284                 }
285         }
286
287         if (spec)
288                 CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
289                               be16_to_cpu(mask->inner_type), ethtype);
290
291         return 0;
292 }
293
294 static int
295 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
296                      struct ch_filter_specification *fs,
297                      struct rte_flow_error *e)
298 {
299         const struct rte_flow_item_udp *val = item->spec;
300         const struct rte_flow_item_udp *umask = item->mask;
301         const struct rte_flow_item_udp *mask;
302
303         mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
304
305         if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
306                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
307                                           item,
308                                           "udp: only src/dst port supported");
309
310         CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
311         if (!val)
312                 return 0;
313         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
314                       be16_to_cpu(mask->hdr.src_port), fport);
315         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
316                       be16_to_cpu(mask->hdr.dst_port), lport);
317         return 0;
318 }
319
320 static int
321 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
322                      struct ch_filter_specification *fs,
323                      struct rte_flow_error *e)
324 {
325         const struct rte_flow_item_tcp *val = item->spec;
326         const struct rte_flow_item_tcp *umask = item->mask;
327         const struct rte_flow_item_tcp *mask;
328
329         mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
330
331         if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
332             mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
333             mask->hdr.tcp_urp)
334                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
335                                           item,
336                                           "tcp: only src/dst port supported");
337
338         CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
339         if (!val)
340                 return 0;
341         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
342                       be16_to_cpu(mask->hdr.src_port), fport);
343         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
344                       be16_to_cpu(mask->hdr.dst_port), lport);
345         return 0;
346 }
347
348 static int
349 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
350                       struct ch_filter_specification *fs,
351                       struct rte_flow_error *e)
352 {
353         const struct rte_flow_item_ipv4 *val = item->spec;
354         const struct rte_flow_item_ipv4 *umask = item->mask;
355         const struct rte_flow_item_ipv4 *mask;
356
357         mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
358
359         if (mask->hdr.time_to_live)
360                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
361                                           item, "ttl is not supported");
362
363         if (fs->mask.ethtype &&
364             (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
365                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
366                                           item,
367                                           "Couldn't find IPv4 ethertype");
368         fs->type = FILTER_TYPE_IPV4;
369         if (!val)
370                 return 0; /* ipv4 wild card */
371
372         CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
373         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
374         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
375         CXGBE_FILL_FS(val->hdr.type_of_service, mask->hdr.type_of_service, tos);
376
377         return 0;
378 }
379
380 static int
381 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
382                       struct ch_filter_specification *fs,
383                       struct rte_flow_error *e)
384 {
385         const struct rte_flow_item_ipv6 *val = item->spec;
386         const struct rte_flow_item_ipv6 *umask = item->mask;
387         const struct rte_flow_item_ipv6 *mask;
388         u32 vtc_flow, vtc_flow_mask;
389
390         mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
391
392         vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
393
394         if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
395             mask->hdr.payload_len || mask->hdr.hop_limits)
396                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
397                                           item,
398                                           "flow/hop are not supported");
399
400         if (fs->mask.ethtype &&
401             (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
402                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
403                                           item,
404                                           "Couldn't find IPv6 ethertype");
405         fs->type = FILTER_TYPE_IPV6;
406         if (!val)
407                 return 0; /* ipv6 wild card */
408
409         CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
410
411         vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
412         CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
413                       RTE_IPV6_HDR_TC_SHIFT,
414                       (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
415                       RTE_IPV6_HDR_TC_SHIFT,
416                       tos);
417
418         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
419         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
420
421         return 0;
422 }
423
424 static int
425 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
426                       struct rte_flow_error *e)
427 {
428         if (attr->egress)
429                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
430                                           attr, "attribute:<egress> is"
431                                           " not supported !");
432         if (attr->group > 0)
433                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
434                                           attr, "group parameter is"
435                                           " not supported.");
436
437         flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
438
439         return 0;
440 }
441
442 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
443 {
444         struct port_info *pi = ethdev2pinfo(dev);
445
446         if (rxq > pi->n_rx_qsets)
447                 return -EINVAL;
448         return 0;
449 }
450
451 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
452 {
453         struct adapter *adap = ethdev2adap(f->dev);
454         struct ch_filter_specification fs = f->fs;
455         u8 nentries;
456
457         if (fidx >= adap->tids.nftids) {
458                 dev_err(adap, "invalid flow index %d.\n", fidx);
459                 return -EINVAL;
460         }
461
462         nentries = cxgbe_filter_slots(adap, fs.type);
463         if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
464                 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
465                 return -EINVAL;
466         }
467
468         return 0;
469 }
470
471 static int
472 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
473                          struct adapter *adap, unsigned int fidx)
474 {
475         u8 nentries;
476
477         nentries = cxgbe_filter_slots(adap, fs->type);
478         if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
479                 dev_err(adap, "filter index: %d is busy.\n", fidx);
480                 return -EBUSY;
481         }
482
483         if (fidx >= adap->tids.nftids) {
484                 dev_err(adap, "filter index (%u) >= max(%u)\n",
485                         fidx, adap->tids.nftids);
486                 return -ERANGE;
487         }
488
489         return 0;
490 }
491
492 static int
493 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
494 {
495         if (flow->fs.cap)
496                 return 0; /* Hash filters */
497         return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
498                 cxgbe_validate_fidxonadd(&flow->fs,
499                                          ethdev2adap(flow->dev), fidx);
500 }
501
502 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
503 {
504         struct ch_filter_specification *fs = &flow->fs;
505         struct adapter *adap = ethdev2adap(flow->dev);
506
507         /* For tcam get the next available slot, if default value specified */
508         if (flow->fidx == FILTER_ID_MAX) {
509                 u8 nentries;
510                 int idx;
511
512                 nentries = cxgbe_filter_slots(adap, fs->type);
513                 idx = cxgbe_alloc_ftid(adap, nentries);
514                 if (idx < 0) {
515                         dev_err(adap, "unable to get a filter index in tcam\n");
516                         return -ENOMEM;
517                 }
518                 *fidx = (unsigned int)idx;
519         } else {
520                 *fidx = flow->fidx;
521         }
522
523         return 0;
524 }
525
526 static int
527 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
528 {
529         const struct rte_flow_item *i;
530         int j, index = -ENOENT;
531
532         for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
533                 if (i->type == type) {
534                         index = j;
535                         break;
536                 }
537         }
538
539         return index;
540 }
541
542 static int
543 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
544 {
545         /* nmode:
546          * BIT_0 = [src_ip],   BIT_1 = [dst_ip]
547          * BIT_2 = [src_port], BIT_3 = [dst_port]
548          *
549          * Only below cases are supported as per our spec.
550          */
551         switch (nmode) {
552         case 0:  /* 0000b */
553                 fs->nat_mode = NAT_MODE_NONE;
554                 break;
555         case 2:  /* 0010b */
556                 fs->nat_mode = NAT_MODE_DIP;
557                 break;
558         case 5:  /* 0101b */
559                 fs->nat_mode = NAT_MODE_SIP_SP;
560                 break;
561         case 7:  /* 0111b */
562                 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
563                 break;
564         case 10: /* 1010b */
565                 fs->nat_mode = NAT_MODE_DIP_DP;
566                 break;
567         case 11: /* 1011b */
568                 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
569                 break;
570         case 14: /* 1110b */
571                 fs->nat_mode = NAT_MODE_DIP_DP_SP;
572                 break;
573         case 15: /* 1111b */
574                 fs->nat_mode = NAT_MODE_ALL;
575                 break;
576         default:
577                 return -EINVAL;
578         }
579
580         return 0;
581 }
582
583 static int
584 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
585                           const struct rte_flow_item items[],
586                           uint8_t *nmode,
587                           struct ch_filter_specification *fs,
588                           struct rte_flow_error *e)
589 {
590         const struct rte_flow_action_of_set_vlan_vid *vlanid;
591         const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
592         const struct rte_flow_action_of_push_vlan *pushvlan;
593         const struct rte_flow_action_set_ipv4 *ipv4;
594         const struct rte_flow_action_set_ipv6 *ipv6;
595         const struct rte_flow_action_set_tp *tp_port;
596         const struct rte_flow_action_phy_port *port;
597         int item_index;
598         u16 tmp_vlan;
599
600         switch (a->type) {
601         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
602                 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
603                           a->conf;
604                 /* If explicitly asked to push a new VLAN header,
605                  * then don't set rewrite mode. Otherwise, the
606                  * incoming VLAN packets will get their VLAN fields
607                  * rewritten, instead of adding an additional outer
608                  * VLAN header.
609                  */
610                 if (fs->newvlan != VLAN_INSERT)
611                         fs->newvlan = VLAN_REWRITE;
612                 tmp_vlan = fs->vlan & 0xe000;
613                 fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
614                 break;
615         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
616                 vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
617                           a->conf;
618                 /* If explicitly asked to push a new VLAN header,
619                  * then don't set rewrite mode. Otherwise, the
620                  * incoming VLAN packets will get their VLAN fields
621                  * rewritten, instead of adding an additional outer
622                  * VLAN header.
623                  */
624                 if (fs->newvlan != VLAN_INSERT)
625                         fs->newvlan = VLAN_REWRITE;
626                 tmp_vlan = fs->vlan & 0xfff;
627                 fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
628                 break;
629         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
630                 pushvlan = (const struct rte_flow_action_of_push_vlan *)
631                             a->conf;
632                 if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
633                         return rte_flow_error_set(e, EINVAL,
634                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
635                                                   "only ethertype 0x8100 "
636                                                   "supported for push vlan.");
637                 fs->newvlan = VLAN_INSERT;
638                 break;
639         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
640                 fs->newvlan = VLAN_REMOVE;
641                 break;
642         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
643                 port = (const struct rte_flow_action_phy_port *)a->conf;
644                 fs->eport = port->index;
645                 break;
646         case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
647                 item_index = cxgbe_get_flow_item_index(items,
648                                                        RTE_FLOW_ITEM_TYPE_IPV4);
649                 if (item_index < 0)
650                         return rte_flow_error_set(e, EINVAL,
651                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
652                                                   "No RTE_FLOW_ITEM_TYPE_IPV4 "
653                                                   "found.");
654
655                 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
656                 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
657                 *nmode |= 1 << 0;
658                 break;
659         case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
660                 item_index = cxgbe_get_flow_item_index(items,
661                                                        RTE_FLOW_ITEM_TYPE_IPV4);
662                 if (item_index < 0)
663                         return rte_flow_error_set(e, EINVAL,
664                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
665                                                   "No RTE_FLOW_ITEM_TYPE_IPV4 "
666                                                   "found.");
667
668                 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
669                 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
670                 *nmode |= 1 << 1;
671                 break;
672         case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
673                 item_index = cxgbe_get_flow_item_index(items,
674                                                        RTE_FLOW_ITEM_TYPE_IPV6);
675                 if (item_index < 0)
676                         return rte_flow_error_set(e, EINVAL,
677                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
678                                                   "No RTE_FLOW_ITEM_TYPE_IPV6 "
679                                                   "found.");
680
681                 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
682                 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
683                 *nmode |= 1 << 0;
684                 break;
685         case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
686                 item_index = cxgbe_get_flow_item_index(items,
687                                                        RTE_FLOW_ITEM_TYPE_IPV6);
688                 if (item_index < 0)
689                         return rte_flow_error_set(e, EINVAL,
690                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
691                                                   "No RTE_FLOW_ITEM_TYPE_IPV6 "
692                                                   "found.");
693
694                 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
695                 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
696                 *nmode |= 1 << 1;
697                 break;
698         case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
699                 item_index = cxgbe_get_flow_item_index(items,
700                                                        RTE_FLOW_ITEM_TYPE_TCP);
701                 if (item_index < 0) {
702                         item_index =
703                                 cxgbe_get_flow_item_index(items,
704                                                 RTE_FLOW_ITEM_TYPE_UDP);
705                         if (item_index < 0)
706                                 return rte_flow_error_set(e, EINVAL,
707                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
708                                                 "No RTE_FLOW_ITEM_TYPE_TCP or "
709                                                 "RTE_FLOW_ITEM_TYPE_UDP found");
710                 }
711
712                 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
713                 fs->nat_fport = be16_to_cpu(tp_port->port);
714                 *nmode |= 1 << 2;
715                 break;
716         case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
717                 item_index = cxgbe_get_flow_item_index(items,
718                                                        RTE_FLOW_ITEM_TYPE_TCP);
719                 if (item_index < 0) {
720                         item_index =
721                                 cxgbe_get_flow_item_index(items,
722                                                 RTE_FLOW_ITEM_TYPE_UDP);
723                         if (item_index < 0)
724                                 return rte_flow_error_set(e, EINVAL,
725                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
726                                                 "No RTE_FLOW_ITEM_TYPE_TCP or "
727                                                 "RTE_FLOW_ITEM_TYPE_UDP found");
728                 }
729
730                 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
731                 fs->nat_lport = be16_to_cpu(tp_port->port);
732                 *nmode |= 1 << 3;
733                 break;
734         case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
735                 item_index = cxgbe_get_flow_item_index(items,
736                                                        RTE_FLOW_ITEM_TYPE_ETH);
737                 if (item_index < 0)
738                         return rte_flow_error_set(e, EINVAL,
739                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
740                                                   "No RTE_FLOW_ITEM_TYPE_ETH "
741                                                   "found");
742                 fs->swapmac = 1;
743                 break;
744         default:
745                 /* We are not supposed to come here */
746                 return rte_flow_error_set(e, EINVAL,
747                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
748                                           "Action not supported");
749         }
750
751         return 0;
752 }
753
754 static int
755 cxgbe_rtef_parse_actions(struct rte_flow *flow,
756                          const struct rte_flow_item items[],
757                          const struct rte_flow_action action[],
758                          struct rte_flow_error *e)
759 {
760         struct ch_filter_specification *fs = &flow->fs;
761         uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
762         uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
763         const struct rte_flow_action_queue *q;
764         const struct rte_flow_action *a;
765         char abit = 0;
766         int ret;
767
768         for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
769                 switch (a->type) {
770                 case RTE_FLOW_ACTION_TYPE_VOID:
771                         continue;
772                 case RTE_FLOW_ACTION_TYPE_DROP:
773                         if (abit++)
774                                 return rte_flow_error_set(e, EINVAL,
775                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
776                                                 "specify only 1 pass/drop");
777                         fs->action = FILTER_DROP;
778                         break;
779                 case RTE_FLOW_ACTION_TYPE_QUEUE:
780                         q = (const struct rte_flow_action_queue *)a->conf;
781                         if (!q)
782                                 return rte_flow_error_set(e, EINVAL,
783                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
784                                                 "specify rx queue index");
785                         if (check_rxq(flow->dev, q->index))
786                                 return rte_flow_error_set(e, EINVAL,
787                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
788                                                 "Invalid rx queue");
789                         if (abit++)
790                                 return rte_flow_error_set(e, EINVAL,
791                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
792                                                 "specify only 1 pass/drop");
793                         fs->action = FILTER_PASS;
794                         fs->dirsteer = 1;
795                         fs->iq = q->index;
796                         break;
797                 case RTE_FLOW_ACTION_TYPE_COUNT:
798                         fs->hitcnts = 1;
799                         break;
800                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
801                         vlan_set_vid++;
802                         goto action_switch;
803                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
804                         vlan_set_pcp++;
805                         goto action_switch;
806                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
807                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
808                 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
809                 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
810                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
811                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
812                         nat_ipv4++;
813                         goto action_switch;
814                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
815                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
816                         nat_ipv6++;
817                         goto action_switch;
818                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
819                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
820 action_switch:
821                         /* We allow multiple switch actions, but switch is
822                          * not compatible with either queue or drop
823                          */
824                         if (abit++ && fs->action != FILTER_SWITCH)
825                                 return rte_flow_error_set(e, EINVAL,
826                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
827                                                 "overlapping action specified");
828                         if (nat_ipv4 && nat_ipv6)
829                                 return rte_flow_error_set(e, EINVAL,
830                                         RTE_FLOW_ERROR_TYPE_ACTION, a,
831                                         "Can't have one address ipv4 and the"
832                                         " other ipv6");
833
834                         ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
835                                                         e);
836                         if (ret)
837                                 return ret;
838                         fs->action = FILTER_SWITCH;
839                         break;
840                 default:
841                         /* Not supported action : return error */
842                         return rte_flow_error_set(e, ENOTSUP,
843                                                   RTE_FLOW_ERROR_TYPE_ACTION,
844                                                   a, "Action not supported");
845                 }
846         }
847
848         if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
849                 return rte_flow_error_set(e, EINVAL,
850                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
851                                           "Both OF_SET_VLAN_VID and "
852                                           "OF_SET_VLAN_PCP must be specified");
853
854         if (ch_rte_parse_nat(nmode, fs))
855                 return rte_flow_error_set(e, EINVAL,
856                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
857                                           "invalid settings for swich action");
858         return 0;
859 }
860
861 static struct chrte_fparse parseitem[] = {
862         [RTE_FLOW_ITEM_TYPE_ETH] = {
863                 .fptr  = ch_rte_parsetype_eth,
864                 .dmask = &(const struct rte_flow_item_eth){
865                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
866                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
867                         .type = 0xffff,
868                 }
869         },
870
871         [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
872                 .fptr = ch_rte_parsetype_port,
873                 .dmask = &(const struct rte_flow_item_phy_port){
874                         .index = 0x7,
875                 }
876         },
877
878         [RTE_FLOW_ITEM_TYPE_VLAN] = {
879                 .fptr = ch_rte_parsetype_vlan,
880                 .dmask = &(const struct rte_flow_item_vlan){
881                         .tci = 0xffff,
882                         .inner_type = 0xffff,
883                 }
884         },
885
886         [RTE_FLOW_ITEM_TYPE_IPV4] = {
887                 .fptr  = ch_rte_parsetype_ipv4,
888                 .dmask = &(const struct rte_flow_item_ipv4) {
889                         .hdr = {
890                                 .src_addr = RTE_BE32(0xffffffff),
891                                 .dst_addr = RTE_BE32(0xffffffff),
892                                 .type_of_service = 0xff,
893                         },
894                 },
895         },
896
897         [RTE_FLOW_ITEM_TYPE_IPV6] = {
898                 .fptr  = ch_rte_parsetype_ipv6,
899                 .dmask = &(const struct rte_flow_item_ipv6) {
900                         .hdr = {
901                                 .src_addr =
902                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
903                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
904                                 .dst_addr =
905                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
906                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
907                                 .vtc_flow = RTE_BE32(0xff000000),
908                         },
909                 },
910         },
911
912         [RTE_FLOW_ITEM_TYPE_UDP] = {
913                 .fptr  = ch_rte_parsetype_udp,
914                 .dmask = &rte_flow_item_udp_mask,
915         },
916
917         [RTE_FLOW_ITEM_TYPE_TCP] = {
918                 .fptr  = ch_rte_parsetype_tcp,
919                 .dmask = &rte_flow_item_tcp_mask,
920         },
921 };
922
923 static int
924 cxgbe_rtef_parse_items(struct rte_flow *flow,
925                        const struct rte_flow_item items[],
926                        struct rte_flow_error *e)
927 {
928         struct adapter *adap = ethdev2adap(flow->dev);
929         const struct rte_flow_item *i;
930         char repeat[ARRAY_SIZE(parseitem)] = {0};
931
932         for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
933                 struct chrte_fparse *idx;
934                 int ret;
935
936                 if (i->type >= ARRAY_SIZE(parseitem))
937                         return rte_flow_error_set(e, ENOTSUP,
938                                                   RTE_FLOW_ERROR_TYPE_ITEM,
939                                                   i, "Item not supported");
940
941                 switch (i->type) {
942                 case RTE_FLOW_ITEM_TYPE_VOID:
943                         continue;
944                 default:
945                         /* check if item is repeated */
946                         if (repeat[i->type] &&
947                             i->type != RTE_FLOW_ITEM_TYPE_VLAN)
948                                 return rte_flow_error_set(e, ENOTSUP,
949                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
950                                                 "parse items cannot be repeated(except void/vlan)");
951
952                         repeat[i->type] = 1;
953
954                         /* No spec found for this pattern item. Skip it */
955                         if (!i->spec)
956                                 break;
957
958                         /* validate the item */
959                         ret = cxgbe_validate_item(i, e);
960                         if (ret)
961                                 return ret;
962
963                         idx = &flow->item_parser[i->type];
964                         if (!idx || !idx->fptr) {
965                                 return rte_flow_error_set(e, ENOTSUP,
966                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
967                                                 "Item not supported");
968                         } else {
969                                 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
970                                 if (ret)
971                                         return ret;
972                         }
973                 }
974         }
975
976         cxgbe_fill_filter_region(adap, &flow->fs);
977         cxgbe_tweak_filter_spec(adap, &flow->fs);
978
979         return 0;
980 }
981
982 static int
983 cxgbe_flow_parse(struct rte_flow *flow,
984                  const struct rte_flow_attr *attr,
985                  const struct rte_flow_item item[],
986                  const struct rte_flow_action action[],
987                  struct rte_flow_error *e)
988 {
989         int ret;
990         /* parse user request into ch_filter_specification */
991         ret = cxgbe_rtef_parse_attr(flow, attr, e);
992         if (ret)
993                 return ret;
994         ret = cxgbe_rtef_parse_items(flow, item, e);
995         if (ret)
996                 return ret;
997         return cxgbe_rtef_parse_actions(flow, item, action, e);
998 }
999
1000 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
1001 {
1002         struct ch_filter_specification *fs = &flow->fs;
1003         struct adapter *adap = ethdev2adap(dev);
1004         struct tid_info *t = &adap->tids;
1005         struct filter_ctx ctx;
1006         unsigned int fidx;
1007         int err;
1008
1009         if (cxgbe_get_fidx(flow, &fidx))
1010                 return -ENOMEM;
1011         if (cxgbe_verify_fidx(flow, fidx, 0))
1012                 return -1;
1013
1014         t4_init_completion(&ctx.completion);
1015         /* go create the filter */
1016         err = cxgbe_set_filter(dev, fidx, fs, &ctx);
1017         if (err) {
1018                 dev_err(adap, "Error %d while creating filter.\n", err);
1019                 return err;
1020         }
1021
1022         /* Poll the FW for reply */
1023         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1024                                         CXGBE_FLOW_POLL_MS,
1025                                         CXGBE_FLOW_POLL_CNT,
1026                                         &ctx.completion);
1027         if (err) {
1028                 dev_err(adap, "Filter set operation timed out (%d)\n", err);
1029                 return err;
1030         }
1031         if (ctx.result) {
1032                 dev_err(adap, "Hardware error %d while creating the filter.\n",
1033                         ctx.result);
1034                 return ctx.result;
1035         }
1036
1037         if (fs->cap) { /* to destroy the filter */
1038                 flow->fidx = ctx.tid;
1039                 flow->f = lookup_tid(t, ctx.tid);
1040         } else {
1041                 flow->fidx = fidx;
1042                 flow->f = &adap->tids.ftid_tab[fidx];
1043         }
1044
1045         return 0;
1046 }
1047
1048 static struct rte_flow *
1049 cxgbe_flow_create(struct rte_eth_dev *dev,
1050                   const struct rte_flow_attr *attr,
1051                   const struct rte_flow_item item[],
1052                   const struct rte_flow_action action[],
1053                   struct rte_flow_error *e)
1054 {
1055         struct adapter *adap = ethdev2adap(dev);
1056         struct rte_flow *flow;
1057         int ret;
1058
1059         flow = t4_os_alloc(sizeof(struct rte_flow));
1060         if (!flow) {
1061                 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1062                                    NULL, "Unable to allocate memory for"
1063                                    " filter_entry");
1064                 return NULL;
1065         }
1066
1067         flow->item_parser = parseitem;
1068         flow->dev = dev;
1069         flow->fs.private = (void *)flow;
1070
1071         if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1072                 t4_os_free(flow);
1073                 return NULL;
1074         }
1075
1076         t4_os_lock(&adap->flow_lock);
1077         /* go, interact with cxgbe_filter */
1078         ret = __cxgbe_flow_create(dev, flow);
1079         t4_os_unlock(&adap->flow_lock);
1080         if (ret) {
1081                 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1082                                    NULL, "Unable to create flow rule");
1083                 t4_os_free(flow);
1084                 return NULL;
1085         }
1086
1087         flow->f->private = flow; /* Will be used during flush */
1088
1089         return flow;
1090 }
1091
1092 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1093 {
1094         struct adapter *adap = ethdev2adap(dev);
1095         struct filter_entry *f = flow->f;
1096         struct ch_filter_specification *fs;
1097         struct filter_ctx ctx;
1098         int err;
1099
1100         fs = &f->fs;
1101         if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1102                 return -1;
1103
1104         t4_init_completion(&ctx.completion);
1105         err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1106         if (err) {
1107                 dev_err(adap, "Error %d while deleting filter.\n", err);
1108                 return err;
1109         }
1110
1111         /* Poll the FW for reply */
1112         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1113                                         CXGBE_FLOW_POLL_MS,
1114                                         CXGBE_FLOW_POLL_CNT,
1115                                         &ctx.completion);
1116         if (err) {
1117                 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1118                 return err;
1119         }
1120         if (ctx.result) {
1121                 dev_err(adap, "Hardware error %d while deleting the filter.\n",
1122                         ctx.result);
1123                 return ctx.result;
1124         }
1125
1126         fs = &flow->fs;
1127         if (fs->mask.macidx) {
1128                 struct port_info *pi = (struct port_info *)
1129                                         (dev->data->dev_private);
1130                 int ret;
1131
1132                 ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
1133                 if (!ret)
1134                         return ret;
1135         }
1136
1137         return 0;
1138 }
1139
1140 static int
1141 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1142                    struct rte_flow_error *e)
1143 {
1144         struct adapter *adap = ethdev2adap(dev);
1145         int ret;
1146
1147         t4_os_lock(&adap->flow_lock);
1148         ret = __cxgbe_flow_destroy(dev, flow);
1149         t4_os_unlock(&adap->flow_lock);
1150         if (ret)
1151                 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1152                                           flow, "error destroying filter.");
1153         t4_os_free(flow);
1154         return 0;
1155 }
1156
1157 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1158                               u64 *byte_count)
1159 {
1160         struct adapter *adap = ethdev2adap(flow->dev);
1161         struct ch_filter_specification fs = flow->f->fs;
1162         unsigned int fidx = flow->fidx;
1163         int ret = 0;
1164
1165         ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1166         if (ret)
1167                 return ret;
1168         return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1169 }
1170
1171 static int
1172 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1173                  const struct rte_flow_action *action, void *data,
1174                  struct rte_flow_error *e)
1175 {
1176         struct adapter *adap = ethdev2adap(flow->dev);
1177         struct ch_filter_specification fs;
1178         struct rte_flow_query_count *c;
1179         struct filter_entry *f;
1180         int ret;
1181
1182         RTE_SET_USED(dev);
1183
1184         f = flow->f;
1185         fs = f->fs;
1186
1187         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1188                 return rte_flow_error_set(e, ENOTSUP,
1189                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1190                                           "only count supported for query");
1191
1192         /*
1193          * This is a valid operation, Since we are allowed to do chelsio
1194          * specific operations in rte side of our code but not vise-versa
1195          *
1196          * So, fs can be queried/modified here BUT rte_flow_query_count
1197          * cannot be worked on by the lower layer since we want to maintain
1198          * it as rte_flow agnostic.
1199          */
1200         if (!fs.hitcnts)
1201                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1202                                           &fs, "filter hit counters were not"
1203                                           " enabled during filter creation");
1204
1205         c = (struct rte_flow_query_count *)data;
1206
1207         t4_os_lock(&adap->flow_lock);
1208         ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1209         if (ret) {
1210                 rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1211                                    f, "cxgbe pmd failed to perform query");
1212                 goto out;
1213         }
1214
1215         /* Query was successful */
1216         c->bytes_set = 1;
1217         c->hits_set = 1;
1218         if (c->reset)
1219                 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1220
1221 out:
1222         t4_os_unlock(&adap->flow_lock);
1223         return ret;
1224 }
1225
1226 static int
1227 cxgbe_flow_validate(struct rte_eth_dev *dev,
1228                     const struct rte_flow_attr *attr,
1229                     const struct rte_flow_item item[],
1230                     const struct rte_flow_action action[],
1231                     struct rte_flow_error *e)
1232 {
1233         struct adapter *adap = ethdev2adap(dev);
1234         struct rte_flow *flow;
1235         unsigned int fidx;
1236         int ret = 0;
1237
1238         flow = t4_os_alloc(sizeof(struct rte_flow));
1239         if (!flow)
1240                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1241                                 NULL,
1242                                 "Unable to allocate memory for filter_entry");
1243
1244         flow->item_parser = parseitem;
1245         flow->dev = dev;
1246
1247         ret = cxgbe_flow_parse(flow, attr, item, action, e);
1248         if (ret) {
1249                 t4_os_free(flow);
1250                 return ret;
1251         }
1252
1253         if (cxgbe_validate_filter(adap, &flow->fs)) {
1254                 t4_os_free(flow);
1255                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1256                                 NULL,
1257                                 "validation failed. Check f/w config file.");
1258         }
1259
1260         t4_os_lock(&adap->flow_lock);
1261         if (cxgbe_get_fidx(flow, &fidx)) {
1262                 ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1263                                          NULL, "no memory in tcam.");
1264                 goto out;
1265         }
1266
1267         if (cxgbe_verify_fidx(flow, fidx, 0)) {
1268                 ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1269                                          NULL, "validation failed");
1270                 goto out;
1271         }
1272
1273 out:
1274         t4_os_unlock(&adap->flow_lock);
1275         t4_os_free(flow);
1276         return ret;
1277 }
1278
1279 /*
1280  * @ret : > 0 filter destroyed succsesfully
1281  *        < 0 error destroying filter
1282  *        == 1 filter not active / not found
1283  */
1284 static int
1285 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1286 {
1287         if (f && (f->valid || f->pending) &&
1288             f->dev == dev && /* Only if user has asked for this port */
1289              f->private) /* We (rte_flow) created this filter */
1290                 return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1291         return 1;
1292 }
1293
1294 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1295 {
1296         struct adapter *adap = ethdev2adap(dev);
1297         unsigned int i;
1298         int ret = 0;
1299
1300         t4_os_lock(&adap->flow_lock);
1301         if (adap->tids.ftid_tab) {
1302                 struct filter_entry *f = &adap->tids.ftid_tab[0];
1303
1304                 for (i = 0; i < adap->tids.nftids; i++, f++) {
1305                         ret = cxgbe_check_n_destroy(f, dev);
1306                         if (ret < 0) {
1307                                 rte_flow_error_set(e, ret,
1308                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1309                                                    f->private,
1310                                                    "error destroying TCAM "
1311                                                    "filter.");
1312                                 goto out;
1313                         }
1314                 }
1315         }
1316
1317         if (is_hashfilter(adap) && adap->tids.tid_tab) {
1318                 struct filter_entry *f;
1319
1320                 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1321                         f = (struct filter_entry *)adap->tids.tid_tab[i];
1322
1323                         ret = cxgbe_check_n_destroy(f, dev);
1324                         if (ret < 0) {
1325                                 rte_flow_error_set(e, ret,
1326                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1327                                                    f->private,
1328                                                    "error destroying HASH "
1329                                                    "filter.");
1330                                 goto out;
1331                         }
1332                 }
1333         }
1334
1335 out:
1336         t4_os_unlock(&adap->flow_lock);
1337         return ret >= 0 ? 0 : ret;
1338 }
1339
1340 static const struct rte_flow_ops cxgbe_flow_ops = {
1341         .validate       = cxgbe_flow_validate,
1342         .create         = cxgbe_flow_create,
1343         .destroy        = cxgbe_flow_destroy,
1344         .flush          = cxgbe_flow_flush,
1345         .query          = cxgbe_flow_query,
1346         .isolate        = NULL,
1347 };
1348
1349 int
1350 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1351                       enum rte_filter_type filter_type,
1352                       enum rte_filter_op filter_op,
1353                       void *arg)
1354 {
1355         int ret = 0;
1356
1357         RTE_SET_USED(dev);
1358         switch (filter_type) {
1359         case RTE_ETH_FILTER_GENERIC:
1360                 if (filter_op != RTE_ETH_FILTER_GET)
1361                         return -EINVAL;
1362                 *(const void **)arg = &cxgbe_flow_ops;
1363                 break;
1364         default:
1365                 ret = -ENOTSUP;
1366                 break;
1367         }
1368         return ret;
1369 }