net/cxgbe: support to offload flows to HASH region
[dpdk.git] / drivers / net / cxgbe / cxgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include "common.h"
6 #include "cxgbe_flow.h"
7
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9 do { \
10         if (!((fs)->val.elem || (fs)->mask.elem)) { \
11                 (fs)->val.elem = (__v); \
12                 (fs)->mask.elem = (__m); \
13         } else { \
14                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
15                                           NULL, "a filter can be specified" \
16                                           " only once"); \
17         } \
18 } while (0)
19
20 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
21 do { \
22         memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
23         memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 } while (0)
25
26 #define CXGBE_FILL_FS(v, m, elem) \
27         __CXGBE_FILL_FS(v, m, fs, elem, e)
28
29 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
30         __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31
32 static int
33 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
34 {
35         /* rte_flow specification does not allow it. */
36         if (!i->spec && (i->mask ||  i->last))
37                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
38                                    i, "last or mask given without spec");
39         /*
40          * We don't support it.
41          * Although, we can support values in last as 0's or last == spec.
42          * But this will not provide user with any additional functionality
43          * and will only increase the complexity for us.
44          */
45         if (i->last)
46                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
47                                    i, "last is not supported by chelsio pmd");
48         return 0;
49 }
50
51 static void
52 cxgbe_fill_filter_region(struct adapter *adap,
53                          struct ch_filter_specification *fs)
54 {
55         struct tp_params *tp = &adap->params.tp;
56         u64 hash_filter_mask = tp->hash_filter_mask;
57         u64 ntuple_mask = 0;
58
59         fs->cap = 0;
60
61         if (!is_hashfilter(adap))
62                 return;
63
64         if (fs->type) {
65                 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
66                                      0xff, 0xff, 0xff, 0xff,
67                                      0xff, 0xff, 0xff, 0xff,
68                                      0xff, 0xff, 0xff, 0xff};
69                 uint8_t bitoff[16] = {0};
70
71                 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
72                     !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
73                     memcmp(fs->mask.lip, biton, sizeof(biton)) ||
74                     memcmp(fs->mask.fip, biton, sizeof(biton)))
75                         return;
76         } else {
77                 uint32_t biton  = 0xffffffff;
78                 uint32_t bitoff = 0x0U;
79
80                 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
81                     !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
82                     memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
83                     memcmp(fs->mask.fip, &biton, sizeof(biton)))
84                         return;
85         }
86
87         if (!fs->val.lport || fs->mask.lport != 0xffff)
88                 return;
89         if (!fs->val.fport || fs->mask.fport != 0xffff)
90                 return;
91
92         if (tp->protocol_shift >= 0)
93                 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
94         if (tp->ethertype_shift >= 0)
95                 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
96
97         if (ntuple_mask != hash_filter_mask)
98                 return;
99
100         fs->cap = 1;    /* use hash region */
101 }
102
103 static int
104 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
105                      struct ch_filter_specification *fs,
106                      struct rte_flow_error *e)
107 {
108         const struct rte_flow_item_udp *val = item->spec;
109         const struct rte_flow_item_udp *umask = item->mask;
110         const struct rte_flow_item_udp *mask;
111
112         mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
113
114         if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
115                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
116                                           item,
117                                           "udp: only src/dst port supported");
118
119         CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
120         if (!val)
121                 return 0;
122         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
123                       be16_to_cpu(mask->hdr.src_port), fport);
124         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
125                       be16_to_cpu(mask->hdr.dst_port), lport);
126         return 0;
127 }
128
129 static int
130 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
131                      struct ch_filter_specification *fs,
132                      struct rte_flow_error *e)
133 {
134         const struct rte_flow_item_tcp *val = item->spec;
135         const struct rte_flow_item_tcp *umask = item->mask;
136         const struct rte_flow_item_tcp *mask;
137
138         mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
139
140         if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
141             mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
142             mask->hdr.tcp_urp)
143                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
144                                           item,
145                                           "tcp: only src/dst port supported");
146
147         CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
148         if (!val)
149                 return 0;
150         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
151                       be16_to_cpu(mask->hdr.src_port), fport);
152         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
153                       be16_to_cpu(mask->hdr.dst_port), lport);
154         return 0;
155 }
156
157 static int
158 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
159                       struct ch_filter_specification *fs,
160                       struct rte_flow_error *e)
161 {
162         const struct rte_flow_item_ipv4 *val = item->spec;
163         const struct rte_flow_item_ipv4 *umask = item->mask;
164         const struct rte_flow_item_ipv4 *mask;
165
166         mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
167
168         if (mask->hdr.time_to_live || mask->hdr.type_of_service)
169                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
170                                           item, "ttl/tos are not supported");
171
172         fs->type = FILTER_TYPE_IPV4;
173         CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
174         if (!val)
175                 return 0; /* ipv4 wild card */
176
177         CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
178         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
179         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
180
181         return 0;
182 }
183
184 static int
185 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
186                       struct ch_filter_specification *fs,
187                       struct rte_flow_error *e)
188 {
189         const struct rte_flow_item_ipv6 *val = item->spec;
190         const struct rte_flow_item_ipv6 *umask = item->mask;
191         const struct rte_flow_item_ipv6 *mask;
192
193         mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
194
195         if (mask->hdr.vtc_flow ||
196             mask->hdr.payload_len || mask->hdr.hop_limits)
197                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
198                                           item,
199                                           "tc/flow/hop are not supported");
200
201         fs->type = FILTER_TYPE_IPV6;
202         CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
203         if (!val)
204                 return 0; /* ipv6 wild card */
205
206         CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
207         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
208         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
209
210         return 0;
211 }
212
213 static int
214 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
215                       struct rte_flow_error *e)
216 {
217         if (attr->egress)
218                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
219                                           attr, "attribute:<egress> is"
220                                           " not supported !");
221         if (attr->group > 0)
222                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
223                                           attr, "group parameter is"
224                                           " not supported.");
225
226         flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
227
228         return 0;
229 }
230
231 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
232 {
233         struct port_info *pi = ethdev2pinfo(dev);
234
235         if (rxq > pi->n_rx_qsets)
236                 return -EINVAL;
237         return 0;
238 }
239
240 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
241 {
242         struct adapter *adap = ethdev2adap(f->dev);
243         struct ch_filter_specification fs = f->fs;
244
245         if (fidx >= adap->tids.nftids) {
246                 dev_err(adap, "invalid flow index %d.\n", fidx);
247                 return -EINVAL;
248         }
249         if (!is_filter_set(&adap->tids, fidx, fs.type)) {
250                 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
251                 return -EINVAL;
252         }
253
254         return 0;
255 }
256
257 static int
258 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
259                          struct adapter *adap, unsigned int fidx)
260 {
261         if (is_filter_set(&adap->tids, fidx, fs->type)) {
262                 dev_err(adap, "filter index: %d is busy.\n", fidx);
263                 return -EBUSY;
264         }
265         if (fidx >= adap->tids.nftids) {
266                 dev_err(adap, "filter index (%u) >= max(%u)\n",
267                         fidx, adap->tids.nftids);
268                 return -ERANGE;
269         }
270
271         return 0;
272 }
273
274 static int
275 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
276 {
277         if (flow->fs.cap)
278                 return 0; /* Hash filters */
279         return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
280                 cxgbe_validate_fidxonadd(&flow->fs,
281                                          ethdev2adap(flow->dev), fidx);
282 }
283
284 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
285 {
286         struct ch_filter_specification *fs = &flow->fs;
287         struct adapter *adap = ethdev2adap(flow->dev);
288
289         /* For tcam get the next available slot, if default value specified */
290         if (flow->fidx == FILTER_ID_MAX) {
291                 int idx;
292
293                 idx = cxgbe_alloc_ftid(adap, fs->type);
294                 if (idx < 0) {
295                         dev_err(adap, "unable to get a filter index in tcam\n");
296                         return -ENOMEM;
297                 }
298                 *fidx = (unsigned int)idx;
299         } else {
300                 *fidx = flow->fidx;
301         }
302
303         return 0;
304 }
305
306 static int
307 cxgbe_rtef_parse_actions(struct rte_flow *flow,
308                          const struct rte_flow_action action[],
309                          struct rte_flow_error *e)
310 {
311         struct ch_filter_specification *fs = &flow->fs;
312         const struct rte_flow_action_queue *q;
313         const struct rte_flow_action *a;
314         char abit = 0;
315
316         for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
317                 switch (a->type) {
318                 case RTE_FLOW_ACTION_TYPE_VOID:
319                         continue;
320                 case RTE_FLOW_ACTION_TYPE_DROP:
321                         if (abit++)
322                                 return rte_flow_error_set(e, EINVAL,
323                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
324                                                 "specify only 1 pass/drop");
325                         fs->action = FILTER_DROP;
326                         break;
327                 case RTE_FLOW_ACTION_TYPE_QUEUE:
328                         q = (const struct rte_flow_action_queue *)a->conf;
329                         if (!q)
330                                 return rte_flow_error_set(e, EINVAL,
331                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
332                                                 "specify rx queue index");
333                         if (check_rxq(flow->dev, q->index))
334                                 return rte_flow_error_set(e, EINVAL,
335                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
336                                                 "Invalid rx queue");
337                         if (abit++)
338                                 return rte_flow_error_set(e, EINVAL,
339                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
340                                                 "specify only 1 pass/drop");
341                         fs->action = FILTER_PASS;
342                         fs->dirsteer = 1;
343                         fs->iq = q->index;
344                         break;
345                 case RTE_FLOW_ACTION_TYPE_COUNT:
346                         fs->hitcnts = 1;
347                         break;
348                 default:
349                         /* Not supported action : return error */
350                         return rte_flow_error_set(e, ENOTSUP,
351                                                   RTE_FLOW_ERROR_TYPE_ACTION,
352                                                   a, "Action not supported");
353                 }
354         }
355
356         return 0;
357 }
358
359 struct chrte_fparse parseitem[] = {
360         [RTE_FLOW_ITEM_TYPE_IPV4] = {
361                 .fptr  = ch_rte_parsetype_ipv4,
362                 .dmask = &rte_flow_item_ipv4_mask,
363         },
364
365         [RTE_FLOW_ITEM_TYPE_IPV6] = {
366                 .fptr  = ch_rte_parsetype_ipv6,
367                 .dmask = &rte_flow_item_ipv6_mask,
368         },
369
370         [RTE_FLOW_ITEM_TYPE_UDP] = {
371                 .fptr  = ch_rte_parsetype_udp,
372                 .dmask = &rte_flow_item_udp_mask,
373         },
374
375         [RTE_FLOW_ITEM_TYPE_TCP] = {
376                 .fptr  = ch_rte_parsetype_tcp,
377                 .dmask = &rte_flow_item_tcp_mask,
378         },
379 };
380
381 static int
382 cxgbe_rtef_parse_items(struct rte_flow *flow,
383                        const struct rte_flow_item items[],
384                        struct rte_flow_error *e)
385 {
386         struct adapter *adap = ethdev2adap(flow->dev);
387         const struct rte_flow_item *i;
388         char repeat[ARRAY_SIZE(parseitem)] = {0};
389
390         for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
391                 struct chrte_fparse *idx = &flow->item_parser[i->type];
392                 int ret;
393
394                 if (i->type > ARRAY_SIZE(parseitem))
395                         return rte_flow_error_set(e, ENOTSUP,
396                                                   RTE_FLOW_ERROR_TYPE_ITEM,
397                                                   i, "Item not supported");
398
399                 switch (i->type) {
400                 case RTE_FLOW_ITEM_TYPE_VOID:
401                         continue;
402                 default:
403                         /* check if item is repeated */
404                         if (repeat[i->type])
405                                 return rte_flow_error_set(e, ENOTSUP,
406                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
407                                                 "parse items cannot be repeated (except void)");
408                         repeat[i->type] = 1;
409
410                         /* validate the item */
411                         ret = cxgbe_validate_item(i, e);
412                         if (ret)
413                                 return ret;
414
415                         if (!idx || !idx->fptr) {
416                                 return rte_flow_error_set(e, ENOTSUP,
417                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
418                                                 "Item not supported");
419                         } else {
420                                 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
421                                 if (ret)
422                                         return ret;
423                         }
424                 }
425         }
426
427         cxgbe_fill_filter_region(adap, &flow->fs);
428
429         return 0;
430 }
431
432 static int
433 cxgbe_flow_parse(struct rte_flow *flow,
434                  const struct rte_flow_attr *attr,
435                  const struct rte_flow_item item[],
436                  const struct rte_flow_action action[],
437                  struct rte_flow_error *e)
438 {
439         int ret;
440
441         /* parse user request into ch_filter_specification */
442         ret = cxgbe_rtef_parse_attr(flow, attr, e);
443         if (ret)
444                 return ret;
445         ret = cxgbe_rtef_parse_items(flow, item, e);
446         if (ret)
447                 return ret;
448         return cxgbe_rtef_parse_actions(flow, action, e);
449 }
450
451 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
452 {
453         struct ch_filter_specification *fs = &flow->fs;
454         struct adapter *adap = ethdev2adap(dev);
455         struct tid_info *t = &adap->tids;
456         struct filter_ctx ctx;
457         unsigned int fidx;
458         int err;
459
460         if (cxgbe_get_fidx(flow, &fidx))
461                 return -ENOMEM;
462         if (cxgbe_verify_fidx(flow, fidx, 0))
463                 return -1;
464
465         t4_init_completion(&ctx.completion);
466         /* go create the filter */
467         err = cxgbe_set_filter(dev, fidx, fs, &ctx);
468         if (err) {
469                 dev_err(adap, "Error %d while creating filter.\n", err);
470                 return err;
471         }
472
473         /* Poll the FW for reply */
474         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
475                                         CXGBE_FLOW_POLL_US,
476                                         CXGBE_FLOW_POLL_CNT,
477                                         &ctx.completion);
478         if (err) {
479                 dev_err(adap, "Filter set operation timed out (%d)\n", err);
480                 return err;
481         }
482         if (ctx.result) {
483                 dev_err(adap, "Hardware error %d while creating the filter.\n",
484                         ctx.result);
485                 return ctx.result;
486         }
487
488         if (fs->cap) { /* to destroy the filter */
489                 flow->fidx = ctx.tid;
490                 flow->f = lookup_tid(t, ctx.tid);
491         } else {
492                 flow->fidx = fidx;
493                 flow->f = &adap->tids.ftid_tab[fidx];
494         }
495
496         return 0;
497 }
498
499 static struct rte_flow *
500 cxgbe_flow_create(struct rte_eth_dev *dev,
501                   const struct rte_flow_attr *attr,
502                   const struct rte_flow_item item[],
503                   const struct rte_flow_action action[],
504                   struct rte_flow_error *e)
505 {
506         struct rte_flow *flow;
507         int ret;
508
509         flow = t4_os_alloc(sizeof(struct rte_flow));
510         if (!flow) {
511                 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
512                                    NULL, "Unable to allocate memory for"
513                                    " filter_entry");
514                 return NULL;
515         }
516
517         flow->item_parser = parseitem;
518         flow->dev = dev;
519
520         if (cxgbe_flow_parse(flow, attr, item, action, e)) {
521                 t4_os_free(flow);
522                 return NULL;
523         }
524
525         /* go, interact with cxgbe_filter */
526         ret = __cxgbe_flow_create(dev, flow);
527         if (ret) {
528                 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
529                                    NULL, "Unable to create flow rule");
530                 t4_os_free(flow);
531                 return NULL;
532         }
533
534         flow->f->private = flow; /* Will be used during flush */
535
536         return flow;
537 }
538
539 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
540 {
541         struct adapter *adap = ethdev2adap(dev);
542         struct filter_entry *f = flow->f;
543         struct ch_filter_specification *fs;
544         struct filter_ctx ctx;
545         int err;
546
547         fs = &f->fs;
548         if (cxgbe_verify_fidx(flow, flow->fidx, 1))
549                 return -1;
550
551         t4_init_completion(&ctx.completion);
552         err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
553         if (err) {
554                 dev_err(adap, "Error %d while deleting filter.\n", err);
555                 return err;
556         }
557
558         /* Poll the FW for reply */
559         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
560                                         CXGBE_FLOW_POLL_US,
561                                         CXGBE_FLOW_POLL_CNT,
562                                         &ctx.completion);
563         if (err) {
564                 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
565                 return err;
566         }
567         if (ctx.result) {
568                 dev_err(adap, "Hardware error %d while deleting the filter.\n",
569                         ctx.result);
570                 return ctx.result;
571         }
572
573         return 0;
574 }
575
576 static int
577 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
578                    struct rte_flow_error *e)
579 {
580         int ret;
581
582         ret = __cxgbe_flow_destroy(dev, flow);
583         if (ret)
584                 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
585                                           flow, "error destroying filter.");
586         t4_os_free(flow);
587         return 0;
588 }
589
590 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
591                               u64 *byte_count)
592 {
593         struct adapter *adap = ethdev2adap(flow->dev);
594         unsigned int fidx = flow->fidx;
595         int ret = 0;
596
597         ret = cxgbe_get_filter_count(adap, fidx, count, 0);
598         if (ret)
599                 return ret;
600         return cxgbe_get_filter_count(adap, fidx, byte_count, 1);
601 }
602
603 static int
604 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
605                  const struct rte_flow_action *action, void *data,
606                  struct rte_flow_error *e)
607 {
608         struct ch_filter_specification fs;
609         struct rte_flow_query_count *c;
610         struct filter_entry *f;
611         int ret;
612
613         RTE_SET_USED(dev);
614
615         f = flow->f;
616         fs = f->fs;
617
618         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
619                 return rte_flow_error_set(e, ENOTSUP,
620                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
621                                           "only count supported for query");
622
623         /*
624          * This is a valid operation, Since we are allowed to do chelsio
625          * specific operations in rte side of our code but not vise-versa
626          *
627          * So, fs can be queried/modified here BUT rte_flow_query_count
628          * cannot be worked on by the lower layer since we want to maintain
629          * it as rte_flow agnostic.
630          */
631         if (!fs.hitcnts)
632                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
633                                           &fs, "filter hit counters were not"
634                                           " enabled during filter creation");
635
636         c = (struct rte_flow_query_count *)data;
637         ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
638         if (ret)
639                 return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
640                                           f, "cxgbe pmd failed to"
641                                           " perform query");
642
643         /* Query was successful */
644         c->bytes_set = 1;
645         c->hits_set = 1;
646
647         return 0; /* success / partial_success */
648 }
649
650 static int
651 cxgbe_flow_validate(struct rte_eth_dev *dev,
652                     const struct rte_flow_attr *attr,
653                     const struct rte_flow_item item[],
654                     const struct rte_flow_action action[],
655                     struct rte_flow_error *e)
656 {
657         struct adapter *adap = ethdev2adap(dev);
658         struct rte_flow *flow;
659         unsigned int fidx;
660         int ret;
661
662         flow = t4_os_alloc(sizeof(struct rte_flow));
663         if (!flow)
664                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
665                                 NULL,
666                                 "Unable to allocate memory for filter_entry");
667
668         flow->item_parser = parseitem;
669         flow->dev = dev;
670
671         ret = cxgbe_flow_parse(flow, attr, item, action, e);
672         if (ret) {
673                 t4_os_free(flow);
674                 return ret;
675         }
676
677         if (validate_filter(adap, &flow->fs)) {
678                 t4_os_free(flow);
679                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
680                                 NULL,
681                                 "validation failed. Check f/w config file.");
682         }
683
684         if (cxgbe_get_fidx(flow, &fidx)) {
685                 t4_os_free(flow);
686                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
687                                           NULL, "no memory in tcam.");
688         }
689
690         if (cxgbe_verify_fidx(flow, fidx, 0)) {
691                 t4_os_free(flow);
692                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
693                                           NULL, "validation failed");
694         }
695
696         t4_os_free(flow);
697         return 0;
698 }
699
700 /*
701  * @ret : > 0 filter destroyed succsesfully
702  *        < 0 error destroying filter
703  *        == 1 filter not active / not found
704  */
705 static int
706 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
707                       struct rte_flow_error *e)
708 {
709         if (f && (f->valid || f->pending) &&
710             f->dev == dev && /* Only if user has asked for this port */
711              f->private) /* We (rte_flow) created this filter */
712                 return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
713                                           e);
714         return 1;
715 }
716
717 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
718 {
719         struct adapter *adap = ethdev2adap(dev);
720         unsigned int i;
721         int ret = 0;
722
723         if (adap->tids.ftid_tab) {
724                 struct filter_entry *f = &adap->tids.ftid_tab[0];
725
726                 for (i = 0; i < adap->tids.nftids; i++, f++) {
727                         ret = cxgbe_check_n_destroy(f, dev, e);
728                         if (ret < 0)
729                                 goto out;
730                 }
731         }
732 out:
733         return ret >= 0 ? 0 : ret;
734 }
735
736 static const struct rte_flow_ops cxgbe_flow_ops = {
737         .validate       = cxgbe_flow_validate,
738         .create         = cxgbe_flow_create,
739         .destroy        = cxgbe_flow_destroy,
740         .flush          = cxgbe_flow_flush,
741         .query          = cxgbe_flow_query,
742         .isolate        = NULL,
743 };
744
745 int
746 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
747                       enum rte_filter_type filter_type,
748                       enum rte_filter_op filter_op,
749                       void *arg)
750 {
751         int ret = 0;
752
753         RTE_SET_USED(dev);
754         switch (filter_type) {
755         case RTE_ETH_FILTER_GENERIC:
756                 if (filter_op != RTE_ETH_FILTER_GET)
757                         return -EINVAL;
758                 *(const void **)arg = &cxgbe_flow_ops;
759                 break;
760         default:
761                 ret = -ENOTSUP;
762                 break;
763         }
764         return ret;
765 }