net/cxgbe: validate flows offloaded to HASH region
[dpdk.git] / drivers / net / cxgbe / cxgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include "common.h"
6 #include "cxgbe_flow.h"
7
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9 do { \
10         if (!((fs)->val.elem || (fs)->mask.elem)) { \
11                 (fs)->val.elem = (__v); \
12                 (fs)->mask.elem = (__m); \
13         } else { \
14                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
15                                           NULL, "a filter can be specified" \
16                                           " only once"); \
17         } \
18 } while (0)
19
20 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
21 do { \
22         memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
23         memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 } while (0)
25
26 #define CXGBE_FILL_FS(v, m, elem) \
27         __CXGBE_FILL_FS(v, m, fs, elem, e)
28
29 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
30         __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31
32 static int
33 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
34 {
35         /* rte_flow specification does not allow it. */
36         if (!i->spec && (i->mask ||  i->last))
37                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
38                                    i, "last or mask given without spec");
39         /*
40          * We don't support it.
41          * Although, we can support values in last as 0's or last == spec.
42          * But this will not provide user with any additional functionality
43          * and will only increase the complexity for us.
44          */
45         if (i->last)
46                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
47                                    i, "last is not supported by chelsio pmd");
48         return 0;
49 }
50
51 static void
52 cxgbe_fill_filter_region(struct adapter *adap,
53                          struct ch_filter_specification *fs)
54 {
55         struct tp_params *tp = &adap->params.tp;
56         u64 hash_filter_mask = tp->hash_filter_mask;
57         u64 ntuple_mask = 0;
58
59         fs->cap = 0;
60
61         if (!is_hashfilter(adap))
62                 return;
63
64         if (fs->type) {
65                 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
66                                      0xff, 0xff, 0xff, 0xff,
67                                      0xff, 0xff, 0xff, 0xff,
68                                      0xff, 0xff, 0xff, 0xff};
69                 uint8_t bitoff[16] = {0};
70
71                 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
72                     !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
73                     memcmp(fs->mask.lip, biton, sizeof(biton)) ||
74                     memcmp(fs->mask.fip, biton, sizeof(biton)))
75                         return;
76         } else {
77                 uint32_t biton  = 0xffffffff;
78                 uint32_t bitoff = 0x0U;
79
80                 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
81                     !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
82                     memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
83                     memcmp(fs->mask.fip, &biton, sizeof(biton)))
84                         return;
85         }
86
87         if (!fs->val.lport || fs->mask.lport != 0xffff)
88                 return;
89         if (!fs->val.fport || fs->mask.fport != 0xffff)
90                 return;
91
92         if (tp->protocol_shift >= 0)
93                 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
94         if (tp->ethertype_shift >= 0)
95                 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
96
97         if (ntuple_mask != hash_filter_mask)
98                 return;
99
100         fs->cap = 1;    /* use hash region */
101 }
102
103 static int
104 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
105                      struct ch_filter_specification *fs,
106                      struct rte_flow_error *e)
107 {
108         const struct rte_flow_item_udp *val = item->spec;
109         const struct rte_flow_item_udp *umask = item->mask;
110         const struct rte_flow_item_udp *mask;
111
112         mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
113
114         if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
115                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
116                                           item,
117                                           "udp: only src/dst port supported");
118
119         CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
120         if (!val)
121                 return 0;
122         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
123                       be16_to_cpu(mask->hdr.src_port), fport);
124         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
125                       be16_to_cpu(mask->hdr.dst_port), lport);
126         return 0;
127 }
128
129 static int
130 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
131                      struct ch_filter_specification *fs,
132                      struct rte_flow_error *e)
133 {
134         const struct rte_flow_item_tcp *val = item->spec;
135         const struct rte_flow_item_tcp *umask = item->mask;
136         const struct rte_flow_item_tcp *mask;
137
138         mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
139
140         if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
141             mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
142             mask->hdr.tcp_urp)
143                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
144                                           item,
145                                           "tcp: only src/dst port supported");
146
147         CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
148         if (!val)
149                 return 0;
150         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
151                       be16_to_cpu(mask->hdr.src_port), fport);
152         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
153                       be16_to_cpu(mask->hdr.dst_port), lport);
154         return 0;
155 }
156
157 static int
158 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
159                       struct ch_filter_specification *fs,
160                       struct rte_flow_error *e)
161 {
162         const struct rte_flow_item_ipv4 *val = item->spec;
163         const struct rte_flow_item_ipv4 *umask = item->mask;
164         const struct rte_flow_item_ipv4 *mask;
165
166         mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
167
168         if (mask->hdr.time_to_live || mask->hdr.type_of_service)
169                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
170                                           item, "ttl/tos are not supported");
171
172         fs->type = FILTER_TYPE_IPV4;
173         CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
174         if (!val)
175                 return 0; /* ipv4 wild card */
176
177         CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
178         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
179         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
180
181         return 0;
182 }
183
184 static int
185 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
186                       struct ch_filter_specification *fs,
187                       struct rte_flow_error *e)
188 {
189         const struct rte_flow_item_ipv6 *val = item->spec;
190         const struct rte_flow_item_ipv6 *umask = item->mask;
191         const struct rte_flow_item_ipv6 *mask;
192
193         mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
194
195         if (mask->hdr.vtc_flow ||
196             mask->hdr.payload_len || mask->hdr.hop_limits)
197                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
198                                           item,
199                                           "tc/flow/hop are not supported");
200
201         fs->type = FILTER_TYPE_IPV6;
202         CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
203         if (!val)
204                 return 0; /* ipv6 wild card */
205
206         CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
207         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
208         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
209
210         return 0;
211 }
212
213 static int
214 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
215                       struct rte_flow_error *e)
216 {
217         if (attr->egress)
218                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
219                                           attr, "attribute:<egress> is"
220                                           " not supported !");
221         if (attr->group > 0)
222                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
223                                           attr, "group parameter is"
224                                           " not supported.");
225
226         flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
227
228         return 0;
229 }
230
231 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
232 {
233         struct port_info *pi = ethdev2pinfo(dev);
234
235         if (rxq > pi->n_rx_qsets)
236                 return -EINVAL;
237         return 0;
238 }
239
240 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
241 {
242         struct adapter *adap = ethdev2adap(f->dev);
243         struct ch_filter_specification fs = f->fs;
244
245         if (fidx >= adap->tids.nftids) {
246                 dev_err(adap, "invalid flow index %d.\n", fidx);
247                 return -EINVAL;
248         }
249         if (!is_filter_set(&adap->tids, fidx, fs.type)) {
250                 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
251                 return -EINVAL;
252         }
253
254         return 0;
255 }
256
257 static int
258 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
259                          struct adapter *adap, unsigned int fidx)
260 {
261         if (is_filter_set(&adap->tids, fidx, fs->type)) {
262                 dev_err(adap, "filter index: %d is busy.\n", fidx);
263                 return -EBUSY;
264         }
265         if (fidx >= adap->tids.nftids) {
266                 dev_err(adap, "filter index (%u) >= max(%u)\n",
267                         fidx, adap->tids.nftids);
268                 return -ERANGE;
269         }
270
271         return 0;
272 }
273
274 static int
275 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
276 {
277         if (flow->fs.cap)
278                 return 0; /* Hash filters */
279         return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
280                 cxgbe_validate_fidxonadd(&flow->fs,
281                                          ethdev2adap(flow->dev), fidx);
282 }
283
284 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
285 {
286         struct ch_filter_specification *fs = &flow->fs;
287         struct adapter *adap = ethdev2adap(flow->dev);
288
289         /* For tcam get the next available slot, if default value specified */
290         if (flow->fidx == FILTER_ID_MAX) {
291                 int idx;
292
293                 idx = cxgbe_alloc_ftid(adap, fs->type);
294                 if (idx < 0) {
295                         dev_err(adap, "unable to get a filter index in tcam\n");
296                         return -ENOMEM;
297                 }
298                 *fidx = (unsigned int)idx;
299         } else {
300                 *fidx = flow->fidx;
301         }
302
303         return 0;
304 }
305
306 static int
307 cxgbe_rtef_parse_actions(struct rte_flow *flow,
308                          const struct rte_flow_action action[],
309                          struct rte_flow_error *e)
310 {
311         struct ch_filter_specification *fs = &flow->fs;
312         const struct rte_flow_action_queue *q;
313         const struct rte_flow_action *a;
314         char abit = 0;
315
316         for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
317                 switch (a->type) {
318                 case RTE_FLOW_ACTION_TYPE_VOID:
319                         continue;
320                 case RTE_FLOW_ACTION_TYPE_DROP:
321                         if (abit++)
322                                 return rte_flow_error_set(e, EINVAL,
323                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
324                                                 "specify only 1 pass/drop");
325                         fs->action = FILTER_DROP;
326                         break;
327                 case RTE_FLOW_ACTION_TYPE_QUEUE:
328                         q = (const struct rte_flow_action_queue *)a->conf;
329                         if (!q)
330                                 return rte_flow_error_set(e, EINVAL,
331                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
332                                                 "specify rx queue index");
333                         if (check_rxq(flow->dev, q->index))
334                                 return rte_flow_error_set(e, EINVAL,
335                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
336                                                 "Invalid rx queue");
337                         if (abit++)
338                                 return rte_flow_error_set(e, EINVAL,
339                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
340                                                 "specify only 1 pass/drop");
341                         fs->action = FILTER_PASS;
342                         fs->dirsteer = 1;
343                         fs->iq = q->index;
344                         break;
345                 case RTE_FLOW_ACTION_TYPE_COUNT:
346                         fs->hitcnts = 1;
347                         break;
348                 default:
349                         /* Not supported action : return error */
350                         return rte_flow_error_set(e, ENOTSUP,
351                                                   RTE_FLOW_ERROR_TYPE_ACTION,
352                                                   a, "Action not supported");
353                 }
354         }
355
356         return 0;
357 }
358
359 struct chrte_fparse parseitem[] = {
360         [RTE_FLOW_ITEM_TYPE_IPV4] = {
361                 .fptr  = ch_rte_parsetype_ipv4,
362                 .dmask = &rte_flow_item_ipv4_mask,
363         },
364
365         [RTE_FLOW_ITEM_TYPE_IPV6] = {
366                 .fptr  = ch_rte_parsetype_ipv6,
367                 .dmask = &rte_flow_item_ipv6_mask,
368         },
369
370         [RTE_FLOW_ITEM_TYPE_UDP] = {
371                 .fptr  = ch_rte_parsetype_udp,
372                 .dmask = &rte_flow_item_udp_mask,
373         },
374
375         [RTE_FLOW_ITEM_TYPE_TCP] = {
376                 .fptr  = ch_rte_parsetype_tcp,
377                 .dmask = &rte_flow_item_tcp_mask,
378         },
379 };
380
381 static int
382 cxgbe_rtef_parse_items(struct rte_flow *flow,
383                        const struct rte_flow_item items[],
384                        struct rte_flow_error *e)
385 {
386         struct adapter *adap = ethdev2adap(flow->dev);
387         const struct rte_flow_item *i;
388         char repeat[ARRAY_SIZE(parseitem)] = {0};
389
390         for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
391                 struct chrte_fparse *idx = &flow->item_parser[i->type];
392                 int ret;
393
394                 if (i->type > ARRAY_SIZE(parseitem))
395                         return rte_flow_error_set(e, ENOTSUP,
396                                                   RTE_FLOW_ERROR_TYPE_ITEM,
397                                                   i, "Item not supported");
398
399                 switch (i->type) {
400                 case RTE_FLOW_ITEM_TYPE_VOID:
401                         continue;
402                 default:
403                         /* check if item is repeated */
404                         if (repeat[i->type])
405                                 return rte_flow_error_set(e, ENOTSUP,
406                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
407                                                 "parse items cannot be repeated (except void)");
408                         repeat[i->type] = 1;
409
410                         /* validate the item */
411                         ret = cxgbe_validate_item(i, e);
412                         if (ret)
413                                 return ret;
414
415                         if (!idx || !idx->fptr) {
416                                 return rte_flow_error_set(e, ENOTSUP,
417                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
418                                                 "Item not supported");
419                         } else {
420                                 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
421                                 if (ret)
422                                         return ret;
423                         }
424                 }
425         }
426
427         cxgbe_fill_filter_region(adap, &flow->fs);
428
429         return 0;
430 }
431
432 static int
433 cxgbe_flow_parse(struct rte_flow *flow,
434                  const struct rte_flow_attr *attr,
435                  const struct rte_flow_item item[],
436                  const struct rte_flow_action action[],
437                  struct rte_flow_error *e)
438 {
439         int ret;
440
441         /* parse user request into ch_filter_specification */
442         ret = cxgbe_rtef_parse_attr(flow, attr, e);
443         if (ret)
444                 return ret;
445         ret = cxgbe_rtef_parse_items(flow, item, e);
446         if (ret)
447                 return ret;
448         return cxgbe_rtef_parse_actions(flow, action, e);
449 }
450
451 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
452 {
453         struct ch_filter_specification *fs = &flow->fs;
454         struct adapter *adap = ethdev2adap(dev);
455         struct filter_ctx ctx;
456         unsigned int fidx;
457         int err;
458
459         if (cxgbe_get_fidx(flow, &fidx))
460                 return -ENOMEM;
461         if (cxgbe_verify_fidx(flow, fidx, 0))
462                 return -1;
463
464         t4_init_completion(&ctx.completion);
465         /* go create the filter */
466         err = cxgbe_set_filter(dev, fidx, fs, &ctx);
467         if (err) {
468                 dev_err(adap, "Error %d while creating filter.\n", err);
469                 return err;
470         }
471
472         /* Poll the FW for reply */
473         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
474                                         CXGBE_FLOW_POLL_US,
475                                         CXGBE_FLOW_POLL_CNT,
476                                         &ctx.completion);
477         if (err) {
478                 dev_err(adap, "Filter set operation timed out (%d)\n", err);
479                 return err;
480         }
481         if (ctx.result) {
482                 dev_err(adap, "Hardware error %d while creating the filter.\n",
483                         ctx.result);
484                 return ctx.result;
485         }
486
487         flow->fidx = fidx;
488         flow->f = &adap->tids.ftid_tab[fidx];
489
490         return 0;
491 }
492
493 static struct rte_flow *
494 cxgbe_flow_create(struct rte_eth_dev *dev,
495                   const struct rte_flow_attr *attr,
496                   const struct rte_flow_item item[],
497                   const struct rte_flow_action action[],
498                   struct rte_flow_error *e)
499 {
500         struct rte_flow *flow;
501         int ret;
502
503         flow = t4_os_alloc(sizeof(struct rte_flow));
504         if (!flow) {
505                 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
506                                    NULL, "Unable to allocate memory for"
507                                    " filter_entry");
508                 return NULL;
509         }
510
511         flow->item_parser = parseitem;
512         flow->dev = dev;
513
514         if (cxgbe_flow_parse(flow, attr, item, action, e)) {
515                 t4_os_free(flow);
516                 return NULL;
517         }
518
519         /* go, interact with cxgbe_filter */
520         ret = __cxgbe_flow_create(dev, flow);
521         if (ret) {
522                 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
523                                    NULL, "Unable to create flow rule");
524                 t4_os_free(flow);
525                 return NULL;
526         }
527
528         flow->f->private = flow; /* Will be used during flush */
529
530         return flow;
531 }
532
533 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
534 {
535         struct adapter *adap = ethdev2adap(dev);
536         struct filter_entry *f = flow->f;
537         struct ch_filter_specification *fs;
538         struct filter_ctx ctx;
539         int err;
540
541         fs = &f->fs;
542         if (cxgbe_verify_fidx(flow, flow->fidx, 1))
543                 return -1;
544
545         t4_init_completion(&ctx.completion);
546         err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
547         if (err) {
548                 dev_err(adap, "Error %d while deleting filter.\n", err);
549                 return err;
550         }
551
552         /* Poll the FW for reply */
553         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
554                                         CXGBE_FLOW_POLL_US,
555                                         CXGBE_FLOW_POLL_CNT,
556                                         &ctx.completion);
557         if (err) {
558                 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
559                 return err;
560         }
561         if (ctx.result) {
562                 dev_err(adap, "Hardware error %d while deleting the filter.\n",
563                         ctx.result);
564                 return ctx.result;
565         }
566
567         return 0;
568 }
569
570 static int
571 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
572                    struct rte_flow_error *e)
573 {
574         int ret;
575
576         ret = __cxgbe_flow_destroy(dev, flow);
577         if (ret)
578                 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
579                                           flow, "error destroying filter.");
580         t4_os_free(flow);
581         return 0;
582 }
583
584 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
585                               u64 *byte_count)
586 {
587         struct adapter *adap = ethdev2adap(flow->dev);
588         unsigned int fidx = flow->fidx;
589         int ret = 0;
590
591         ret = cxgbe_get_filter_count(adap, fidx, count, 0);
592         if (ret)
593                 return ret;
594         return cxgbe_get_filter_count(adap, fidx, byte_count, 1);
595 }
596
597 static int
598 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
599                  const struct rte_flow_action *action, void *data,
600                  struct rte_flow_error *e)
601 {
602         struct ch_filter_specification fs;
603         struct rte_flow_query_count *c;
604         struct filter_entry *f;
605         int ret;
606
607         RTE_SET_USED(dev);
608
609         f = flow->f;
610         fs = f->fs;
611
612         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
613                 return rte_flow_error_set(e, ENOTSUP,
614                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
615                                           "only count supported for query");
616
617         /*
618          * This is a valid operation, Since we are allowed to do chelsio
619          * specific operations in rte side of our code but not vise-versa
620          *
621          * So, fs can be queried/modified here BUT rte_flow_query_count
622          * cannot be worked on by the lower layer since we want to maintain
623          * it as rte_flow agnostic.
624          */
625         if (!fs.hitcnts)
626                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
627                                           &fs, "filter hit counters were not"
628                                           " enabled during filter creation");
629
630         c = (struct rte_flow_query_count *)data;
631         ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
632         if (ret)
633                 return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
634                                           f, "cxgbe pmd failed to"
635                                           " perform query");
636
637         /* Query was successful */
638         c->bytes_set = 1;
639         c->hits_set = 1;
640
641         return 0; /* success / partial_success */
642 }
643
644 static int
645 cxgbe_flow_validate(struct rte_eth_dev *dev,
646                     const struct rte_flow_attr *attr,
647                     const struct rte_flow_item item[],
648                     const struct rte_flow_action action[],
649                     struct rte_flow_error *e)
650 {
651         struct adapter *adap = ethdev2adap(dev);
652         struct rte_flow *flow;
653         unsigned int fidx;
654         int ret;
655
656         flow = t4_os_alloc(sizeof(struct rte_flow));
657         if (!flow)
658                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
659                                 NULL,
660                                 "Unable to allocate memory for filter_entry");
661
662         flow->item_parser = parseitem;
663         flow->dev = dev;
664
665         ret = cxgbe_flow_parse(flow, attr, item, action, e);
666         if (ret) {
667                 t4_os_free(flow);
668                 return ret;
669         }
670
671         if (validate_filter(adap, &flow->fs)) {
672                 t4_os_free(flow);
673                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
674                                 NULL,
675                                 "validation failed. Check f/w config file.");
676         }
677
678         if (cxgbe_get_fidx(flow, &fidx)) {
679                 t4_os_free(flow);
680                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
681                                           NULL, "no memory in tcam.");
682         }
683
684         if (cxgbe_verify_fidx(flow, fidx, 0)) {
685                 t4_os_free(flow);
686                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
687                                           NULL, "validation failed");
688         }
689
690         t4_os_free(flow);
691         return 0;
692 }
693
694 /*
695  * @ret : > 0 filter destroyed succsesfully
696  *        < 0 error destroying filter
697  *        == 1 filter not active / not found
698  */
699 static int
700 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
701                       struct rte_flow_error *e)
702 {
703         if (f && (f->valid || f->pending) &&
704             f->dev == dev && /* Only if user has asked for this port */
705              f->private) /* We (rte_flow) created this filter */
706                 return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
707                                           e);
708         return 1;
709 }
710
711 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
712 {
713         struct adapter *adap = ethdev2adap(dev);
714         unsigned int i;
715         int ret = 0;
716
717         if (adap->tids.ftid_tab) {
718                 struct filter_entry *f = &adap->tids.ftid_tab[0];
719
720                 for (i = 0; i < adap->tids.nftids; i++, f++) {
721                         ret = cxgbe_check_n_destroy(f, dev, e);
722                         if (ret < 0)
723                                 goto out;
724                 }
725         }
726 out:
727         return ret >= 0 ? 0 : ret;
728 }
729
730 static const struct rte_flow_ops cxgbe_flow_ops = {
731         .validate       = cxgbe_flow_validate,
732         .create         = cxgbe_flow_create,
733         .destroy        = cxgbe_flow_destroy,
734         .flush          = cxgbe_flow_flush,
735         .query          = cxgbe_flow_query,
736         .isolate        = NULL,
737 };
738
739 int
740 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
741                       enum rte_filter_type filter_type,
742                       enum rte_filter_op filter_op,
743                       void *arg)
744 {
745         int ret = 0;
746
747         RTE_SET_USED(dev);
748         switch (filter_type) {
749         case RTE_ETH_FILTER_GENERIC:
750                 if (filter_op != RTE_ETH_FILTER_GET)
751                         return -EINVAL;
752                 *(const void **)arg = &cxgbe_flow_ops;
753                 break;
754         default:
755                 ret = -ENOTSUP;
756                 break;
757         }
758         return ret;
759 }