038d479d6b993cc9ef36e2414eb0b93a6deb6abe
[dpdk.git] / drivers / net / cxgbe / cxgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include "common.h"
6 #include "cxgbe_flow.h"
7
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9 do { \
10         if (!((fs)->val.elem || (fs)->mask.elem)) { \
11                 (fs)->val.elem = (__v); \
12                 (fs)->mask.elem = (__m); \
13         } else { \
14                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
15                                           NULL, "a filter can be specified" \
16                                           " only once"); \
17         } \
18 } while (0)
19
20 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
21 do { \
22         memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
23         memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 } while (0)
25
26 #define CXGBE_FILL_FS(v, m, elem) \
27         __CXGBE_FILL_FS(v, m, fs, elem, e)
28
29 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
30         __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31
32 static int
33 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
34 {
35         /* rte_flow specification does not allow it. */
36         if (!i->spec && (i->mask ||  i->last))
37                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
38                                    i, "last or mask given without spec");
39         /*
40          * We don't support it.
41          * Although, we can support values in last as 0's or last == spec.
42          * But this will not provide user with any additional functionality
43          * and will only increase the complexity for us.
44          */
45         if (i->last)
46                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
47                                    i, "last is not supported by chelsio pmd");
48         return 0;
49 }
50
51 static void
52 cxgbe_fill_filter_region(struct adapter *adap,
53                          struct ch_filter_specification *fs)
54 {
55         struct tp_params *tp = &adap->params.tp;
56         u64 hash_filter_mask = tp->hash_filter_mask;
57         u64 ntuple_mask = 0;
58
59         fs->cap = 0;
60
61         if (!is_hashfilter(adap))
62                 return;
63
64         if (fs->type) {
65                 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
66                                      0xff, 0xff, 0xff, 0xff,
67                                      0xff, 0xff, 0xff, 0xff,
68                                      0xff, 0xff, 0xff, 0xff};
69                 uint8_t bitoff[16] = {0};
70
71                 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
72                     !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
73                     memcmp(fs->mask.lip, biton, sizeof(biton)) ||
74                     memcmp(fs->mask.fip, biton, sizeof(biton)))
75                         return;
76         } else {
77                 uint32_t biton  = 0xffffffff;
78                 uint32_t bitoff = 0x0U;
79
80                 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
81                     !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
82                     memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
83                     memcmp(fs->mask.fip, &biton, sizeof(biton)))
84                         return;
85         }
86
87         if (!fs->val.lport || fs->mask.lport != 0xffff)
88                 return;
89         if (!fs->val.fport || fs->mask.fport != 0xffff)
90                 return;
91
92         if (tp->protocol_shift >= 0)
93                 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
94         if (tp->ethertype_shift >= 0)
95                 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
96         if (tp->port_shift >= 0)
97                 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
98
99         if (ntuple_mask != hash_filter_mask)
100                 return;
101
102         fs->cap = 1;    /* use hash region */
103 }
104
105 static int
106 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
107                       struct ch_filter_specification *fs,
108                       struct rte_flow_error *e)
109 {
110         const struct rte_flow_item_phy_port *val = item->spec;
111         const struct rte_flow_item_phy_port *umask = item->mask;
112         const struct rte_flow_item_phy_port *mask;
113
114         mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
115
116         if (val->index > 0x7)
117                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
118                                           item,
119                                           "port index upto 0x7 is supported");
120
121         CXGBE_FILL_FS(val->index, mask->index, iport);
122
123         return 0;
124 }
125
126 static int
127 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
128                      struct ch_filter_specification *fs,
129                      struct rte_flow_error *e)
130 {
131         const struct rte_flow_item_udp *val = item->spec;
132         const struct rte_flow_item_udp *umask = item->mask;
133         const struct rte_flow_item_udp *mask;
134
135         mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
136
137         if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
138                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
139                                           item,
140                                           "udp: only src/dst port supported");
141
142         CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
143         if (!val)
144                 return 0;
145         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
146                       be16_to_cpu(mask->hdr.src_port), fport);
147         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
148                       be16_to_cpu(mask->hdr.dst_port), lport);
149         return 0;
150 }
151
152 static int
153 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
154                      struct ch_filter_specification *fs,
155                      struct rte_flow_error *e)
156 {
157         const struct rte_flow_item_tcp *val = item->spec;
158         const struct rte_flow_item_tcp *umask = item->mask;
159         const struct rte_flow_item_tcp *mask;
160
161         mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
162
163         if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
164             mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
165             mask->hdr.tcp_urp)
166                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
167                                           item,
168                                           "tcp: only src/dst port supported");
169
170         CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
171         if (!val)
172                 return 0;
173         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
174                       be16_to_cpu(mask->hdr.src_port), fport);
175         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
176                       be16_to_cpu(mask->hdr.dst_port), lport);
177         return 0;
178 }
179
180 static int
181 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
182                       struct ch_filter_specification *fs,
183                       struct rte_flow_error *e)
184 {
185         const struct rte_flow_item_ipv4 *val = item->spec;
186         const struct rte_flow_item_ipv4 *umask = item->mask;
187         const struct rte_flow_item_ipv4 *mask;
188
189         mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
190
191         if (mask->hdr.time_to_live || mask->hdr.type_of_service)
192                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
193                                           item, "ttl/tos are not supported");
194
195         fs->type = FILTER_TYPE_IPV4;
196         CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
197         if (!val)
198                 return 0; /* ipv4 wild card */
199
200         CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
201         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
202         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
203
204         return 0;
205 }
206
207 static int
208 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
209                       struct ch_filter_specification *fs,
210                       struct rte_flow_error *e)
211 {
212         const struct rte_flow_item_ipv6 *val = item->spec;
213         const struct rte_flow_item_ipv6 *umask = item->mask;
214         const struct rte_flow_item_ipv6 *mask;
215
216         mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
217
218         if (mask->hdr.vtc_flow ||
219             mask->hdr.payload_len || mask->hdr.hop_limits)
220                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
221                                           item,
222                                           "tc/flow/hop are not supported");
223
224         fs->type = FILTER_TYPE_IPV6;
225         CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
226         if (!val)
227                 return 0; /* ipv6 wild card */
228
229         CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
230         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
231         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
232
233         return 0;
234 }
235
236 static int
237 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
238                       struct rte_flow_error *e)
239 {
240         if (attr->egress)
241                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
242                                           attr, "attribute:<egress> is"
243                                           " not supported !");
244         if (attr->group > 0)
245                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
246                                           attr, "group parameter is"
247                                           " not supported.");
248
249         flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
250
251         return 0;
252 }
253
254 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
255 {
256         struct port_info *pi = ethdev2pinfo(dev);
257
258         if (rxq > pi->n_rx_qsets)
259                 return -EINVAL;
260         return 0;
261 }
262
263 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
264 {
265         struct adapter *adap = ethdev2adap(f->dev);
266         struct ch_filter_specification fs = f->fs;
267
268         if (fidx >= adap->tids.nftids) {
269                 dev_err(adap, "invalid flow index %d.\n", fidx);
270                 return -EINVAL;
271         }
272         if (!is_filter_set(&adap->tids, fidx, fs.type)) {
273                 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
274                 return -EINVAL;
275         }
276
277         return 0;
278 }
279
280 static int
281 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
282                          struct adapter *adap, unsigned int fidx)
283 {
284         if (is_filter_set(&adap->tids, fidx, fs->type)) {
285                 dev_err(adap, "filter index: %d is busy.\n", fidx);
286                 return -EBUSY;
287         }
288         if (fidx >= adap->tids.nftids) {
289                 dev_err(adap, "filter index (%u) >= max(%u)\n",
290                         fidx, adap->tids.nftids);
291                 return -ERANGE;
292         }
293
294         return 0;
295 }
296
297 static int
298 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
299 {
300         if (flow->fs.cap)
301                 return 0; /* Hash filters */
302         return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
303                 cxgbe_validate_fidxonadd(&flow->fs,
304                                          ethdev2adap(flow->dev), fidx);
305 }
306
307 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
308 {
309         struct ch_filter_specification *fs = &flow->fs;
310         struct adapter *adap = ethdev2adap(flow->dev);
311
312         /* For tcam get the next available slot, if default value specified */
313         if (flow->fidx == FILTER_ID_MAX) {
314                 int idx;
315
316                 idx = cxgbe_alloc_ftid(adap, fs->type);
317                 if (idx < 0) {
318                         dev_err(adap, "unable to get a filter index in tcam\n");
319                         return -ENOMEM;
320                 }
321                 *fidx = (unsigned int)idx;
322         } else {
323                 *fidx = flow->fidx;
324         }
325
326         return 0;
327 }
328
329 static int
330 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
331                           struct ch_filter_specification *fs,
332                           struct rte_flow_error *e)
333 {
334         const struct rte_flow_action_phy_port *port;
335
336         switch (a->type) {
337         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
338                 port = (const struct rte_flow_action_phy_port *)a->conf;
339                 fs->eport = port->index;
340                 break;
341         default:
342                 /* We are not supposed to come here */
343                 return rte_flow_error_set(e, EINVAL,
344                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
345                                           "Action not supported");
346         }
347
348         return 0;
349 }
350
351 static int
352 cxgbe_rtef_parse_actions(struct rte_flow *flow,
353                          const struct rte_flow_action action[],
354                          struct rte_flow_error *e)
355 {
356         struct ch_filter_specification *fs = &flow->fs;
357         const struct rte_flow_action_queue *q;
358         const struct rte_flow_action *a;
359         char abit = 0;
360         int ret;
361
362         for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
363                 switch (a->type) {
364                 case RTE_FLOW_ACTION_TYPE_VOID:
365                         continue;
366                 case RTE_FLOW_ACTION_TYPE_DROP:
367                         if (abit++)
368                                 return rte_flow_error_set(e, EINVAL,
369                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
370                                                 "specify only 1 pass/drop");
371                         fs->action = FILTER_DROP;
372                         break;
373                 case RTE_FLOW_ACTION_TYPE_QUEUE:
374                         q = (const struct rte_flow_action_queue *)a->conf;
375                         if (!q)
376                                 return rte_flow_error_set(e, EINVAL,
377                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
378                                                 "specify rx queue index");
379                         if (check_rxq(flow->dev, q->index))
380                                 return rte_flow_error_set(e, EINVAL,
381                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
382                                                 "Invalid rx queue");
383                         if (abit++)
384                                 return rte_flow_error_set(e, EINVAL,
385                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
386                                                 "specify only 1 pass/drop");
387                         fs->action = FILTER_PASS;
388                         fs->dirsteer = 1;
389                         fs->iq = q->index;
390                         break;
391                 case RTE_FLOW_ACTION_TYPE_COUNT:
392                         fs->hitcnts = 1;
393                         break;
394                 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
395                         /* We allow multiple switch actions, but switch is
396                          * not compatible with either queue or drop
397                          */
398                         if (abit++ && fs->action != FILTER_SWITCH)
399                                 return rte_flow_error_set(e, EINVAL,
400                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
401                                                 "overlapping action specified");
402                         ret = ch_rte_parse_atype_switch(a, fs, e);
403                         if (ret)
404                                 return ret;
405                         fs->action = FILTER_SWITCH;
406                         break;
407                 default:
408                         /* Not supported action : return error */
409                         return rte_flow_error_set(e, ENOTSUP,
410                                                   RTE_FLOW_ERROR_TYPE_ACTION,
411                                                   a, "Action not supported");
412                 }
413         }
414
415         return 0;
416 }
417
418 struct chrte_fparse parseitem[] = {
419                 [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
420                 .fptr = ch_rte_parsetype_port,
421                 .dmask = &(const struct rte_flow_item_phy_port){
422                         .index = 0x7,
423                 }
424         },
425
426         [RTE_FLOW_ITEM_TYPE_IPV4] = {
427                 .fptr  = ch_rte_parsetype_ipv4,
428                 .dmask = &rte_flow_item_ipv4_mask,
429         },
430
431         [RTE_FLOW_ITEM_TYPE_IPV6] = {
432                 .fptr  = ch_rte_parsetype_ipv6,
433                 .dmask = &rte_flow_item_ipv6_mask,
434         },
435
436         [RTE_FLOW_ITEM_TYPE_UDP] = {
437                 .fptr  = ch_rte_parsetype_udp,
438                 .dmask = &rte_flow_item_udp_mask,
439         },
440
441         [RTE_FLOW_ITEM_TYPE_TCP] = {
442                 .fptr  = ch_rte_parsetype_tcp,
443                 .dmask = &rte_flow_item_tcp_mask,
444         },
445 };
446
447 static int
448 cxgbe_rtef_parse_items(struct rte_flow *flow,
449                        const struct rte_flow_item items[],
450                        struct rte_flow_error *e)
451 {
452         struct adapter *adap = ethdev2adap(flow->dev);
453         const struct rte_flow_item *i;
454         char repeat[ARRAY_SIZE(parseitem)] = {0};
455
456         for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
457                 struct chrte_fparse *idx;
458                 int ret;
459
460                 if (i->type >= ARRAY_SIZE(parseitem))
461                         return rte_flow_error_set(e, ENOTSUP,
462                                                   RTE_FLOW_ERROR_TYPE_ITEM,
463                                                   i, "Item not supported");
464
465                 switch (i->type) {
466                 case RTE_FLOW_ITEM_TYPE_VOID:
467                         continue;
468                 default:
469                         /* check if item is repeated */
470                         if (repeat[i->type])
471                                 return rte_flow_error_set(e, ENOTSUP,
472                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
473                                                 "parse items cannot be repeated (except void)");
474                         repeat[i->type] = 1;
475
476                         /* validate the item */
477                         ret = cxgbe_validate_item(i, e);
478                         if (ret)
479                                 return ret;
480
481                         idx = &flow->item_parser[i->type];
482                         if (!idx || !idx->fptr) {
483                                 return rte_flow_error_set(e, ENOTSUP,
484                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
485                                                 "Item not supported");
486                         } else {
487                                 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
488                                 if (ret)
489                                         return ret;
490                         }
491                 }
492         }
493
494         cxgbe_fill_filter_region(adap, &flow->fs);
495
496         return 0;
497 }
498
499 static int
500 cxgbe_flow_parse(struct rte_flow *flow,
501                  const struct rte_flow_attr *attr,
502                  const struct rte_flow_item item[],
503                  const struct rte_flow_action action[],
504                  struct rte_flow_error *e)
505 {
506         int ret;
507
508         /* parse user request into ch_filter_specification */
509         ret = cxgbe_rtef_parse_attr(flow, attr, e);
510         if (ret)
511                 return ret;
512         ret = cxgbe_rtef_parse_items(flow, item, e);
513         if (ret)
514                 return ret;
515         return cxgbe_rtef_parse_actions(flow, action, e);
516 }
517
518 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
519 {
520         struct ch_filter_specification *fs = &flow->fs;
521         struct adapter *adap = ethdev2adap(dev);
522         struct tid_info *t = &adap->tids;
523         struct filter_ctx ctx;
524         unsigned int fidx;
525         int err;
526
527         if (cxgbe_get_fidx(flow, &fidx))
528                 return -ENOMEM;
529         if (cxgbe_verify_fidx(flow, fidx, 0))
530                 return -1;
531
532         t4_init_completion(&ctx.completion);
533         /* go create the filter */
534         err = cxgbe_set_filter(dev, fidx, fs, &ctx);
535         if (err) {
536                 dev_err(adap, "Error %d while creating filter.\n", err);
537                 return err;
538         }
539
540         /* Poll the FW for reply */
541         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
542                                         CXGBE_FLOW_POLL_US,
543                                         CXGBE_FLOW_POLL_CNT,
544                                         &ctx.completion);
545         if (err) {
546                 dev_err(adap, "Filter set operation timed out (%d)\n", err);
547                 return err;
548         }
549         if (ctx.result) {
550                 dev_err(adap, "Hardware error %d while creating the filter.\n",
551                         ctx.result);
552                 return ctx.result;
553         }
554
555         if (fs->cap) { /* to destroy the filter */
556                 flow->fidx = ctx.tid;
557                 flow->f = lookup_tid(t, ctx.tid);
558         } else {
559                 flow->fidx = fidx;
560                 flow->f = &adap->tids.ftid_tab[fidx];
561         }
562
563         return 0;
564 }
565
566 static struct rte_flow *
567 cxgbe_flow_create(struct rte_eth_dev *dev,
568                   const struct rte_flow_attr *attr,
569                   const struct rte_flow_item item[],
570                   const struct rte_flow_action action[],
571                   struct rte_flow_error *e)
572 {
573         struct rte_flow *flow;
574         int ret;
575
576         flow = t4_os_alloc(sizeof(struct rte_flow));
577         if (!flow) {
578                 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
579                                    NULL, "Unable to allocate memory for"
580                                    " filter_entry");
581                 return NULL;
582         }
583
584         flow->item_parser = parseitem;
585         flow->dev = dev;
586
587         if (cxgbe_flow_parse(flow, attr, item, action, e)) {
588                 t4_os_free(flow);
589                 return NULL;
590         }
591
592         /* go, interact with cxgbe_filter */
593         ret = __cxgbe_flow_create(dev, flow);
594         if (ret) {
595                 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
596                                    NULL, "Unable to create flow rule");
597                 t4_os_free(flow);
598                 return NULL;
599         }
600
601         flow->f->private = flow; /* Will be used during flush */
602
603         return flow;
604 }
605
606 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
607 {
608         struct adapter *adap = ethdev2adap(dev);
609         struct filter_entry *f = flow->f;
610         struct ch_filter_specification *fs;
611         struct filter_ctx ctx;
612         int err;
613
614         fs = &f->fs;
615         if (cxgbe_verify_fidx(flow, flow->fidx, 1))
616                 return -1;
617
618         t4_init_completion(&ctx.completion);
619         err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
620         if (err) {
621                 dev_err(adap, "Error %d while deleting filter.\n", err);
622                 return err;
623         }
624
625         /* Poll the FW for reply */
626         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
627                                         CXGBE_FLOW_POLL_US,
628                                         CXGBE_FLOW_POLL_CNT,
629                                         &ctx.completion);
630         if (err) {
631                 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
632                 return err;
633         }
634         if (ctx.result) {
635                 dev_err(adap, "Hardware error %d while deleting the filter.\n",
636                         ctx.result);
637                 return ctx.result;
638         }
639
640         return 0;
641 }
642
643 static int
644 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
645                    struct rte_flow_error *e)
646 {
647         int ret;
648
649         ret = __cxgbe_flow_destroy(dev, flow);
650         if (ret)
651                 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
652                                           flow, "error destroying filter.");
653         t4_os_free(flow);
654         return 0;
655 }
656
657 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
658                               u64 *byte_count)
659 {
660         struct adapter *adap = ethdev2adap(flow->dev);
661         struct ch_filter_specification fs = flow->f->fs;
662         unsigned int fidx = flow->fidx;
663         int ret = 0;
664
665         ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
666         if (ret)
667                 return ret;
668         return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
669 }
670
671 static int
672 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
673                  const struct rte_flow_action *action, void *data,
674                  struct rte_flow_error *e)
675 {
676         struct ch_filter_specification fs;
677         struct rte_flow_query_count *c;
678         struct filter_entry *f;
679         int ret;
680
681         RTE_SET_USED(dev);
682
683         f = flow->f;
684         fs = f->fs;
685
686         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
687                 return rte_flow_error_set(e, ENOTSUP,
688                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
689                                           "only count supported for query");
690
691         /*
692          * This is a valid operation, Since we are allowed to do chelsio
693          * specific operations in rte side of our code but not vise-versa
694          *
695          * So, fs can be queried/modified here BUT rte_flow_query_count
696          * cannot be worked on by the lower layer since we want to maintain
697          * it as rte_flow agnostic.
698          */
699         if (!fs.hitcnts)
700                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
701                                           &fs, "filter hit counters were not"
702                                           " enabled during filter creation");
703
704         c = (struct rte_flow_query_count *)data;
705         ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
706         if (ret)
707                 return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
708                                           f, "cxgbe pmd failed to"
709                                           " perform query");
710
711         /* Query was successful */
712         c->bytes_set = 1;
713         c->hits_set = 1;
714
715         return 0; /* success / partial_success */
716 }
717
718 static int
719 cxgbe_flow_validate(struct rte_eth_dev *dev,
720                     const struct rte_flow_attr *attr,
721                     const struct rte_flow_item item[],
722                     const struct rte_flow_action action[],
723                     struct rte_flow_error *e)
724 {
725         struct adapter *adap = ethdev2adap(dev);
726         struct rte_flow *flow;
727         unsigned int fidx;
728         int ret;
729
730         flow = t4_os_alloc(sizeof(struct rte_flow));
731         if (!flow)
732                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
733                                 NULL,
734                                 "Unable to allocate memory for filter_entry");
735
736         flow->item_parser = parseitem;
737         flow->dev = dev;
738
739         ret = cxgbe_flow_parse(flow, attr, item, action, e);
740         if (ret) {
741                 t4_os_free(flow);
742                 return ret;
743         }
744
745         if (validate_filter(adap, &flow->fs)) {
746                 t4_os_free(flow);
747                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
748                                 NULL,
749                                 "validation failed. Check f/w config file.");
750         }
751
752         if (cxgbe_get_fidx(flow, &fidx)) {
753                 t4_os_free(flow);
754                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
755                                           NULL, "no memory in tcam.");
756         }
757
758         if (cxgbe_verify_fidx(flow, fidx, 0)) {
759                 t4_os_free(flow);
760                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
761                                           NULL, "validation failed");
762         }
763
764         t4_os_free(flow);
765         return 0;
766 }
767
768 /*
769  * @ret : > 0 filter destroyed succsesfully
770  *        < 0 error destroying filter
771  *        == 1 filter not active / not found
772  */
773 static int
774 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
775                       struct rte_flow_error *e)
776 {
777         if (f && (f->valid || f->pending) &&
778             f->dev == dev && /* Only if user has asked for this port */
779              f->private) /* We (rte_flow) created this filter */
780                 return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
781                                           e);
782         return 1;
783 }
784
785 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
786 {
787         struct adapter *adap = ethdev2adap(dev);
788         unsigned int i;
789         int ret = 0;
790
791         if (adap->tids.ftid_tab) {
792                 struct filter_entry *f = &adap->tids.ftid_tab[0];
793
794                 for (i = 0; i < adap->tids.nftids; i++, f++) {
795                         ret = cxgbe_check_n_destroy(f, dev, e);
796                         if (ret < 0)
797                                 goto out;
798                 }
799         }
800
801         if (is_hashfilter(adap) && adap->tids.tid_tab) {
802                 struct filter_entry *f;
803
804                 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
805                         f = (struct filter_entry *)adap->tids.tid_tab[i];
806
807                         ret = cxgbe_check_n_destroy(f, dev, e);
808                         if (ret < 0)
809                                 goto out;
810                 }
811         }
812
813 out:
814         return ret >= 0 ? 0 : ret;
815 }
816
817 static const struct rte_flow_ops cxgbe_flow_ops = {
818         .validate       = cxgbe_flow_validate,
819         .create         = cxgbe_flow_create,
820         .destroy        = cxgbe_flow_destroy,
821         .flush          = cxgbe_flow_flush,
822         .query          = cxgbe_flow_query,
823         .isolate        = NULL,
824 };
825
826 int
827 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
828                       enum rte_filter_type filter_type,
829                       enum rte_filter_op filter_op,
830                       void *arg)
831 {
832         int ret = 0;
833
834         RTE_SET_USED(dev);
835         switch (filter_type) {
836         case RTE_ETH_FILTER_GENERIC:
837                 if (filter_op != RTE_ETH_FILTER_GET)
838                         return -EINVAL;
839                 *(const void **)arg = &cxgbe_flow_ops;
840                 break;
841         default:
842                 ret = -ENOTSUP;
843                 break;
844         }
845         return ret;
846 }