net/cxgbe: add flow actions to modify IP and TCP/UDP port
[dpdk.git] / drivers / net / cxgbe / cxgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include "common.h"
6 #include "cxgbe_flow.h"
7
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9 do { \
10         if (!((fs)->val.elem || (fs)->mask.elem)) { \
11                 (fs)->val.elem = (__v); \
12                 (fs)->mask.elem = (__m); \
13         } else { \
14                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
15                                           NULL, "a filter can be specified" \
16                                           " only once"); \
17         } \
18 } while (0)
19
20 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
21 do { \
22         memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
23         memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 } while (0)
25
26 #define CXGBE_FILL_FS(v, m, elem) \
27         __CXGBE_FILL_FS(v, m, fs, elem, e)
28
29 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
30         __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31
32 static int
33 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
34 {
35         /* rte_flow specification does not allow it. */
36         if (!i->spec && (i->mask ||  i->last))
37                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
38                                    i, "last or mask given without spec");
39         /*
40          * We don't support it.
41          * Although, we can support values in last as 0's or last == spec.
42          * But this will not provide user with any additional functionality
43          * and will only increase the complexity for us.
44          */
45         if (i->last)
46                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
47                                    i, "last is not supported by chelsio pmd");
48         return 0;
49 }
50
51 static void
52 cxgbe_fill_filter_region(struct adapter *adap,
53                          struct ch_filter_specification *fs)
54 {
55         struct tp_params *tp = &adap->params.tp;
56         u64 hash_filter_mask = tp->hash_filter_mask;
57         u64 ntuple_mask = 0;
58
59         fs->cap = 0;
60
61         if (!is_hashfilter(adap))
62                 return;
63
64         if (fs->type) {
65                 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
66                                      0xff, 0xff, 0xff, 0xff,
67                                      0xff, 0xff, 0xff, 0xff,
68                                      0xff, 0xff, 0xff, 0xff};
69                 uint8_t bitoff[16] = {0};
70
71                 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
72                     !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
73                     memcmp(fs->mask.lip, biton, sizeof(biton)) ||
74                     memcmp(fs->mask.fip, biton, sizeof(biton)))
75                         return;
76         } else {
77                 uint32_t biton  = 0xffffffff;
78                 uint32_t bitoff = 0x0U;
79
80                 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
81                     !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
82                     memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
83                     memcmp(fs->mask.fip, &biton, sizeof(biton)))
84                         return;
85         }
86
87         if (!fs->val.lport || fs->mask.lport != 0xffff)
88                 return;
89         if (!fs->val.fport || fs->mask.fport != 0xffff)
90                 return;
91
92         if (tp->protocol_shift >= 0)
93                 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
94         if (tp->ethertype_shift >= 0)
95                 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
96         if (tp->port_shift >= 0)
97                 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
98         if (tp->macmatch_shift >= 0)
99                 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
100
101         if (ntuple_mask != hash_filter_mask)
102                 return;
103
104         fs->cap = 1;    /* use hash region */
105 }
106
107 static int
108 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
109                      struct ch_filter_specification *fs,
110                      struct rte_flow_error *e)
111 {
112         const struct rte_flow_item_eth *spec = item->spec;
113         const struct rte_flow_item_eth *umask = item->mask;
114         const struct rte_flow_item_eth *mask;
115
116         /* If user has not given any mask, then use chelsio supported mask. */
117         mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
118
119         /* we don't support SRC_MAC filtering*/
120         if (!is_zero_ether_addr(&mask->src))
121                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
122                                           item,
123                                           "src mac filtering not supported");
124
125         if (!is_zero_ether_addr(&mask->dst)) {
126                 const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
127                 const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
128                 struct rte_flow *flow = (struct rte_flow *)fs->private;
129                 struct port_info *pi = (struct port_info *)
130                                         (flow->dev->data->dev_private);
131                 int idx;
132
133                 idx = cxgbe_mpstcam_alloc(pi, addr, m);
134                 if (idx <= 0)
135                         return rte_flow_error_set(e, idx,
136                                                   RTE_FLOW_ERROR_TYPE_ITEM,
137                                                   NULL, "unable to allocate mac"
138                                                   " entry in h/w");
139                 CXGBE_FILL_FS(idx, 0x1ff, macidx);
140         }
141
142         CXGBE_FILL_FS(be16_to_cpu(spec->type),
143                       be16_to_cpu(mask->type), ethtype);
144         return 0;
145 }
146
147 static int
148 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
149                       struct ch_filter_specification *fs,
150                       struct rte_flow_error *e)
151 {
152         const struct rte_flow_item_phy_port *val = item->spec;
153         const struct rte_flow_item_phy_port *umask = item->mask;
154         const struct rte_flow_item_phy_port *mask;
155
156         mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
157
158         if (val->index > 0x7)
159                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
160                                           item,
161                                           "port index upto 0x7 is supported");
162
163         CXGBE_FILL_FS(val->index, mask->index, iport);
164
165         return 0;
166 }
167
168 static int
169 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
170                      struct ch_filter_specification *fs,
171                      struct rte_flow_error *e)
172 {
173         const struct rte_flow_item_udp *val = item->spec;
174         const struct rte_flow_item_udp *umask = item->mask;
175         const struct rte_flow_item_udp *mask;
176
177         mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
178
179         if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
180                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
181                                           item,
182                                           "udp: only src/dst port supported");
183
184         CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
185         if (!val)
186                 return 0;
187         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
188                       be16_to_cpu(mask->hdr.src_port), fport);
189         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
190                       be16_to_cpu(mask->hdr.dst_port), lport);
191         return 0;
192 }
193
194 static int
195 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
196                      struct ch_filter_specification *fs,
197                      struct rte_flow_error *e)
198 {
199         const struct rte_flow_item_tcp *val = item->spec;
200         const struct rte_flow_item_tcp *umask = item->mask;
201         const struct rte_flow_item_tcp *mask;
202
203         mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
204
205         if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
206             mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
207             mask->hdr.tcp_urp)
208                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
209                                           item,
210                                           "tcp: only src/dst port supported");
211
212         CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
213         if (!val)
214                 return 0;
215         CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
216                       be16_to_cpu(mask->hdr.src_port), fport);
217         CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
218                       be16_to_cpu(mask->hdr.dst_port), lport);
219         return 0;
220 }
221
222 static int
223 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
224                       struct ch_filter_specification *fs,
225                       struct rte_flow_error *e)
226 {
227         const struct rte_flow_item_ipv4 *val = item->spec;
228         const struct rte_flow_item_ipv4 *umask = item->mask;
229         const struct rte_flow_item_ipv4 *mask;
230
231         mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
232
233         if (mask->hdr.time_to_live || mask->hdr.type_of_service)
234                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
235                                           item, "ttl/tos are not supported");
236
237         fs->type = FILTER_TYPE_IPV4;
238         CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
239         if (!val)
240                 return 0; /* ipv4 wild card */
241
242         CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
243         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
244         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
245
246         return 0;
247 }
248
249 static int
250 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
251                       struct ch_filter_specification *fs,
252                       struct rte_flow_error *e)
253 {
254         const struct rte_flow_item_ipv6 *val = item->spec;
255         const struct rte_flow_item_ipv6 *umask = item->mask;
256         const struct rte_flow_item_ipv6 *mask;
257
258         mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
259
260         if (mask->hdr.vtc_flow ||
261             mask->hdr.payload_len || mask->hdr.hop_limits)
262                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
263                                           item,
264                                           "tc/flow/hop are not supported");
265
266         fs->type = FILTER_TYPE_IPV6;
267         CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
268         if (!val)
269                 return 0; /* ipv6 wild card */
270
271         CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
272         CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
273         CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
274
275         return 0;
276 }
277
278 static int
279 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
280                       struct rte_flow_error *e)
281 {
282         if (attr->egress)
283                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
284                                           attr, "attribute:<egress> is"
285                                           " not supported !");
286         if (attr->group > 0)
287                 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
288                                           attr, "group parameter is"
289                                           " not supported.");
290
291         flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
292
293         return 0;
294 }
295
296 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
297 {
298         struct port_info *pi = ethdev2pinfo(dev);
299
300         if (rxq > pi->n_rx_qsets)
301                 return -EINVAL;
302         return 0;
303 }
304
305 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
306 {
307         struct adapter *adap = ethdev2adap(f->dev);
308         struct ch_filter_specification fs = f->fs;
309
310         if (fidx >= adap->tids.nftids) {
311                 dev_err(adap, "invalid flow index %d.\n", fidx);
312                 return -EINVAL;
313         }
314         if (!is_filter_set(&adap->tids, fidx, fs.type)) {
315                 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
316                 return -EINVAL;
317         }
318
319         return 0;
320 }
321
322 static int
323 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
324                          struct adapter *adap, unsigned int fidx)
325 {
326         if (is_filter_set(&adap->tids, fidx, fs->type)) {
327                 dev_err(adap, "filter index: %d is busy.\n", fidx);
328                 return -EBUSY;
329         }
330         if (fidx >= adap->tids.nftids) {
331                 dev_err(adap, "filter index (%u) >= max(%u)\n",
332                         fidx, adap->tids.nftids);
333                 return -ERANGE;
334         }
335
336         return 0;
337 }
338
339 static int
340 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
341 {
342         if (flow->fs.cap)
343                 return 0; /* Hash filters */
344         return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
345                 cxgbe_validate_fidxonadd(&flow->fs,
346                                          ethdev2adap(flow->dev), fidx);
347 }
348
349 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
350 {
351         struct ch_filter_specification *fs = &flow->fs;
352         struct adapter *adap = ethdev2adap(flow->dev);
353
354         /* For tcam get the next available slot, if default value specified */
355         if (flow->fidx == FILTER_ID_MAX) {
356                 int idx;
357
358                 idx = cxgbe_alloc_ftid(adap, fs->type);
359                 if (idx < 0) {
360                         dev_err(adap, "unable to get a filter index in tcam\n");
361                         return -ENOMEM;
362                 }
363                 *fidx = (unsigned int)idx;
364         } else {
365                 *fidx = flow->fidx;
366         }
367
368         return 0;
369 }
370
371 static int
372 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
373 {
374         const struct rte_flow_item *i;
375         int j, index = -ENOENT;
376
377         for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
378                 if (i->type == type) {
379                         index = j;
380                         break;
381                 }
382         }
383
384         return index;
385 }
386
387 static int
388 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
389 {
390         /* nmode:
391          * BIT_0 = [src_ip],   BIT_1 = [dst_ip]
392          * BIT_2 = [src_port], BIT_3 = [dst_port]
393          *
394          * Only below cases are supported as per our spec.
395          */
396         switch (nmode) {
397         case 0:  /* 0000b */
398                 fs->nat_mode = NAT_MODE_NONE;
399                 break;
400         case 2:  /* 0010b */
401                 fs->nat_mode = NAT_MODE_DIP;
402                 break;
403         case 5:  /* 0101b */
404                 fs->nat_mode = NAT_MODE_SIP_SP;
405                 break;
406         case 7:  /* 0111b */
407                 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
408                 break;
409         case 10: /* 1010b */
410                 fs->nat_mode = NAT_MODE_DIP_DP;
411                 break;
412         case 11: /* 1011b */
413                 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
414                 break;
415         case 14: /* 1110b */
416                 fs->nat_mode = NAT_MODE_DIP_DP_SP;
417                 break;
418         case 15: /* 1111b */
419                 fs->nat_mode = NAT_MODE_ALL;
420                 break;
421         default:
422                 return -EINVAL;
423         }
424
425         return 0;
426 }
427
428 static int
429 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
430                           const struct rte_flow_item items[],
431                           uint8_t *nmode,
432                           struct ch_filter_specification *fs,
433                           struct rte_flow_error *e)
434 {
435         const struct rte_flow_action_of_set_vlan_vid *vlanid;
436         const struct rte_flow_action_of_push_vlan *pushvlan;
437         const struct rte_flow_action_set_ipv4 *ipv4;
438         const struct rte_flow_action_set_ipv6 *ipv6;
439         const struct rte_flow_action_set_tp *tp_port;
440         const struct rte_flow_action_phy_port *port;
441         int item_index;
442
443         switch (a->type) {
444         case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
445                 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
446                           a->conf;
447                 fs->newvlan = VLAN_REWRITE;
448                 fs->vlan = vlanid->vlan_vid;
449                 break;
450         case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
451                 pushvlan = (const struct rte_flow_action_of_push_vlan *)
452                             a->conf;
453                 if (pushvlan->ethertype != ETHER_TYPE_VLAN)
454                         return rte_flow_error_set(e, EINVAL,
455                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
456                                                   "only ethertype 0x8100 "
457                                                   "supported for push vlan.");
458                 fs->newvlan = VLAN_INSERT;
459                 break;
460         case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
461                 fs->newvlan = VLAN_REMOVE;
462                 break;
463         case RTE_FLOW_ACTION_TYPE_PHY_PORT:
464                 port = (const struct rte_flow_action_phy_port *)a->conf;
465                 fs->eport = port->index;
466                 break;
467         case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
468                 item_index = cxgbe_get_flow_item_index(items,
469                                                        RTE_FLOW_ITEM_TYPE_IPV4);
470                 if (item_index < 0)
471                         return rte_flow_error_set(e, EINVAL,
472                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
473                                                   "No RTE_FLOW_ITEM_TYPE_IPV4 "
474                                                   "found.");
475
476                 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
477                 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
478                 *nmode |= 1 << 0;
479                 break;
480         case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
481                 item_index = cxgbe_get_flow_item_index(items,
482                                                        RTE_FLOW_ITEM_TYPE_IPV4);
483                 if (item_index < 0)
484                         return rte_flow_error_set(e, EINVAL,
485                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
486                                                   "No RTE_FLOW_ITEM_TYPE_IPV4 "
487                                                   "found.");
488
489                 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
490                 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
491                 *nmode |= 1 << 1;
492                 break;
493         case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
494                 item_index = cxgbe_get_flow_item_index(items,
495                                                        RTE_FLOW_ITEM_TYPE_IPV6);
496                 if (item_index < 0)
497                         return rte_flow_error_set(e, EINVAL,
498                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
499                                                   "No RTE_FLOW_ITEM_TYPE_IPV6 "
500                                                   "found.");
501
502                 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
503                 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
504                 *nmode |= 1 << 0;
505                 break;
506         case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
507                 item_index = cxgbe_get_flow_item_index(items,
508                                                        RTE_FLOW_ITEM_TYPE_IPV6);
509                 if (item_index < 0)
510                         return rte_flow_error_set(e, EINVAL,
511                                                   RTE_FLOW_ERROR_TYPE_ACTION, a,
512                                                   "No RTE_FLOW_ITEM_TYPE_IPV6 "
513                                                   "found.");
514
515                 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
516                 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
517                 *nmode |= 1 << 1;
518                 break;
519         case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
520                 item_index = cxgbe_get_flow_item_index(items,
521                                                        RTE_FLOW_ITEM_TYPE_TCP);
522                 if (item_index < 0) {
523                         item_index =
524                                 cxgbe_get_flow_item_index(items,
525                                                 RTE_FLOW_ITEM_TYPE_UDP);
526                         if (item_index < 0)
527                                 return rte_flow_error_set(e, EINVAL,
528                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
529                                                 "No RTE_FLOW_ITEM_TYPE_TCP or "
530                                                 "RTE_FLOW_ITEM_TYPE_UDP found");
531                 }
532
533                 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
534                 fs->nat_fport = be16_to_cpu(tp_port->port);
535                 *nmode |= 1 << 2;
536                 break;
537         case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
538                 item_index = cxgbe_get_flow_item_index(items,
539                                                        RTE_FLOW_ITEM_TYPE_TCP);
540                 if (item_index < 0) {
541                         item_index =
542                                 cxgbe_get_flow_item_index(items,
543                                                 RTE_FLOW_ITEM_TYPE_UDP);
544                         if (item_index < 0)
545                                 return rte_flow_error_set(e, EINVAL,
546                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
547                                                 "No RTE_FLOW_ITEM_TYPE_TCP or "
548                                                 "RTE_FLOW_ITEM_TYPE_UDP found");
549                 }
550
551                 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
552                 fs->nat_lport = be16_to_cpu(tp_port->port);
553                 *nmode |= 1 << 3;
554                 break;
555         default:
556                 /* We are not supposed to come here */
557                 return rte_flow_error_set(e, EINVAL,
558                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
559                                           "Action not supported");
560         }
561
562         return 0;
563 }
564
565 static int
566 cxgbe_rtef_parse_actions(struct rte_flow *flow,
567                          const struct rte_flow_item items[],
568                          const struct rte_flow_action action[],
569                          struct rte_flow_error *e)
570 {
571         struct ch_filter_specification *fs = &flow->fs;
572         uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
573         const struct rte_flow_action_queue *q;
574         const struct rte_flow_action *a;
575         char abit = 0;
576         int ret;
577
578         for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
579                 switch (a->type) {
580                 case RTE_FLOW_ACTION_TYPE_VOID:
581                         continue;
582                 case RTE_FLOW_ACTION_TYPE_DROP:
583                         if (abit++)
584                                 return rte_flow_error_set(e, EINVAL,
585                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
586                                                 "specify only 1 pass/drop");
587                         fs->action = FILTER_DROP;
588                         break;
589                 case RTE_FLOW_ACTION_TYPE_QUEUE:
590                         q = (const struct rte_flow_action_queue *)a->conf;
591                         if (!q)
592                                 return rte_flow_error_set(e, EINVAL,
593                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
594                                                 "specify rx queue index");
595                         if (check_rxq(flow->dev, q->index))
596                                 return rte_flow_error_set(e, EINVAL,
597                                                 RTE_FLOW_ERROR_TYPE_ACTION, q,
598                                                 "Invalid rx queue");
599                         if (abit++)
600                                 return rte_flow_error_set(e, EINVAL,
601                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
602                                                 "specify only 1 pass/drop");
603                         fs->action = FILTER_PASS;
604                         fs->dirsteer = 1;
605                         fs->iq = q->index;
606                         break;
607                 case RTE_FLOW_ACTION_TYPE_COUNT:
608                         fs->hitcnts = 1;
609                         break;
610                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
611                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
612                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
613                 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
614                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
615                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
616                         nat_ipv4++;
617                         goto action_switch;
618                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
619                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
620                         nat_ipv6++;
621                         goto action_switch;
622                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
623                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
624 action_switch:
625                         /* We allow multiple switch actions, but switch is
626                          * not compatible with either queue or drop
627                          */
628                         if (abit++ && fs->action != FILTER_SWITCH)
629                                 return rte_flow_error_set(e, EINVAL,
630                                                 RTE_FLOW_ERROR_TYPE_ACTION, a,
631                                                 "overlapping action specified");
632                         if (nat_ipv4 && nat_ipv6)
633                                 return rte_flow_error_set(e, EINVAL,
634                                         RTE_FLOW_ERROR_TYPE_ACTION, a,
635                                         "Can't have one address ipv4 and the"
636                                         " other ipv6");
637
638                         ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
639                                                         e);
640                         if (ret)
641                                 return ret;
642                         fs->action = FILTER_SWITCH;
643                         break;
644                 default:
645                         /* Not supported action : return error */
646                         return rte_flow_error_set(e, ENOTSUP,
647                                                   RTE_FLOW_ERROR_TYPE_ACTION,
648                                                   a, "Action not supported");
649                 }
650         }
651
652         if (ch_rte_parse_nat(nmode, fs))
653                 return rte_flow_error_set(e, EINVAL,
654                                           RTE_FLOW_ERROR_TYPE_ACTION, a,
655                                           "invalid settings for swich action");
656         return 0;
657 }
658
659 struct chrte_fparse parseitem[] = {
660         [RTE_FLOW_ITEM_TYPE_ETH] = {
661                 .fptr  = ch_rte_parsetype_eth,
662                 .dmask = &(const struct rte_flow_item_eth){
663                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
664                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
665                         .type = 0xffff,
666                 }
667         },
668
669         [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
670                 .fptr = ch_rte_parsetype_port,
671                 .dmask = &(const struct rte_flow_item_phy_port){
672                         .index = 0x7,
673                 }
674         },
675
676         [RTE_FLOW_ITEM_TYPE_IPV4] = {
677                 .fptr  = ch_rte_parsetype_ipv4,
678                 .dmask = &rte_flow_item_ipv4_mask,
679         },
680
681         [RTE_FLOW_ITEM_TYPE_IPV6] = {
682                 .fptr  = ch_rte_parsetype_ipv6,
683                 .dmask = &rte_flow_item_ipv6_mask,
684         },
685
686         [RTE_FLOW_ITEM_TYPE_UDP] = {
687                 .fptr  = ch_rte_parsetype_udp,
688                 .dmask = &rte_flow_item_udp_mask,
689         },
690
691         [RTE_FLOW_ITEM_TYPE_TCP] = {
692                 .fptr  = ch_rte_parsetype_tcp,
693                 .dmask = &rte_flow_item_tcp_mask,
694         },
695 };
696
697 static int
698 cxgbe_rtef_parse_items(struct rte_flow *flow,
699                        const struct rte_flow_item items[],
700                        struct rte_flow_error *e)
701 {
702         struct adapter *adap = ethdev2adap(flow->dev);
703         const struct rte_flow_item *i;
704         char repeat[ARRAY_SIZE(parseitem)] = {0};
705
706         for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
707                 struct chrte_fparse *idx;
708                 int ret;
709
710                 if (i->type >= ARRAY_SIZE(parseitem))
711                         return rte_flow_error_set(e, ENOTSUP,
712                                                   RTE_FLOW_ERROR_TYPE_ITEM,
713                                                   i, "Item not supported");
714
715                 switch (i->type) {
716                 case RTE_FLOW_ITEM_TYPE_VOID:
717                         continue;
718                 default:
719                         /* check if item is repeated */
720                         if (repeat[i->type])
721                                 return rte_flow_error_set(e, ENOTSUP,
722                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
723                                                 "parse items cannot be repeated (except void)");
724                         repeat[i->type] = 1;
725
726                         /* validate the item */
727                         ret = cxgbe_validate_item(i, e);
728                         if (ret)
729                                 return ret;
730
731                         idx = &flow->item_parser[i->type];
732                         if (!idx || !idx->fptr) {
733                                 return rte_flow_error_set(e, ENOTSUP,
734                                                 RTE_FLOW_ERROR_TYPE_ITEM, i,
735                                                 "Item not supported");
736                         } else {
737                                 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
738                                 if (ret)
739                                         return ret;
740                         }
741                 }
742         }
743
744         cxgbe_fill_filter_region(adap, &flow->fs);
745
746         return 0;
747 }
748
749 static int
750 cxgbe_flow_parse(struct rte_flow *flow,
751                  const struct rte_flow_attr *attr,
752                  const struct rte_flow_item item[],
753                  const struct rte_flow_action action[],
754                  struct rte_flow_error *e)
755 {
756         int ret;
757         /* parse user request into ch_filter_specification */
758         ret = cxgbe_rtef_parse_attr(flow, attr, e);
759         if (ret)
760                 return ret;
761         ret = cxgbe_rtef_parse_items(flow, item, e);
762         if (ret)
763                 return ret;
764         return cxgbe_rtef_parse_actions(flow, item, action, e);
765 }
766
767 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
768 {
769         struct ch_filter_specification *fs = &flow->fs;
770         struct adapter *adap = ethdev2adap(dev);
771         struct tid_info *t = &adap->tids;
772         struct filter_ctx ctx;
773         unsigned int fidx;
774         int err;
775
776         if (cxgbe_get_fidx(flow, &fidx))
777                 return -ENOMEM;
778         if (cxgbe_verify_fidx(flow, fidx, 0))
779                 return -1;
780
781         t4_init_completion(&ctx.completion);
782         /* go create the filter */
783         err = cxgbe_set_filter(dev, fidx, fs, &ctx);
784         if (err) {
785                 dev_err(adap, "Error %d while creating filter.\n", err);
786                 return err;
787         }
788
789         /* Poll the FW for reply */
790         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
791                                         CXGBE_FLOW_POLL_US,
792                                         CXGBE_FLOW_POLL_CNT,
793                                         &ctx.completion);
794         if (err) {
795                 dev_err(adap, "Filter set operation timed out (%d)\n", err);
796                 return err;
797         }
798         if (ctx.result) {
799                 dev_err(adap, "Hardware error %d while creating the filter.\n",
800                         ctx.result);
801                 return ctx.result;
802         }
803
804         if (fs->cap) { /* to destroy the filter */
805                 flow->fidx = ctx.tid;
806                 flow->f = lookup_tid(t, ctx.tid);
807         } else {
808                 flow->fidx = fidx;
809                 flow->f = &adap->tids.ftid_tab[fidx];
810         }
811
812         return 0;
813 }
814
815 static struct rte_flow *
816 cxgbe_flow_create(struct rte_eth_dev *dev,
817                   const struct rte_flow_attr *attr,
818                   const struct rte_flow_item item[],
819                   const struct rte_flow_action action[],
820                   struct rte_flow_error *e)
821 {
822         struct rte_flow *flow;
823         int ret;
824
825         flow = t4_os_alloc(sizeof(struct rte_flow));
826         if (!flow) {
827                 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
828                                    NULL, "Unable to allocate memory for"
829                                    " filter_entry");
830                 return NULL;
831         }
832
833         flow->item_parser = parseitem;
834         flow->dev = dev;
835         flow->fs.private = (void *)flow;
836
837         if (cxgbe_flow_parse(flow, attr, item, action, e)) {
838                 t4_os_free(flow);
839                 return NULL;
840         }
841
842         /* go, interact with cxgbe_filter */
843         ret = __cxgbe_flow_create(dev, flow);
844         if (ret) {
845                 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
846                                    NULL, "Unable to create flow rule");
847                 t4_os_free(flow);
848                 return NULL;
849         }
850
851         flow->f->private = flow; /* Will be used during flush */
852
853         return flow;
854 }
855
856 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
857 {
858         struct adapter *adap = ethdev2adap(dev);
859         struct filter_entry *f = flow->f;
860         struct ch_filter_specification *fs;
861         struct filter_ctx ctx;
862         int err;
863
864         fs = &f->fs;
865         if (cxgbe_verify_fidx(flow, flow->fidx, 1))
866                 return -1;
867
868         t4_init_completion(&ctx.completion);
869         err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
870         if (err) {
871                 dev_err(adap, "Error %d while deleting filter.\n", err);
872                 return err;
873         }
874
875         /* Poll the FW for reply */
876         err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
877                                         CXGBE_FLOW_POLL_US,
878                                         CXGBE_FLOW_POLL_CNT,
879                                         &ctx.completion);
880         if (err) {
881                 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
882                 return err;
883         }
884         if (ctx.result) {
885                 dev_err(adap, "Hardware error %d while deleting the filter.\n",
886                         ctx.result);
887                 return ctx.result;
888         }
889
890         fs = &flow->fs;
891         if (fs->mask.macidx) {
892                 struct port_info *pi = (struct port_info *)
893                                         (dev->data->dev_private);
894                 int ret;
895
896                 ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
897                 if (!ret)
898                         return ret;
899         }
900
901         return 0;
902 }
903
904 static int
905 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
906                    struct rte_flow_error *e)
907 {
908         int ret;
909
910         ret = __cxgbe_flow_destroy(dev, flow);
911         if (ret)
912                 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
913                                           flow, "error destroying filter.");
914         t4_os_free(flow);
915         return 0;
916 }
917
918 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
919                               u64 *byte_count)
920 {
921         struct adapter *adap = ethdev2adap(flow->dev);
922         struct ch_filter_specification fs = flow->f->fs;
923         unsigned int fidx = flow->fidx;
924         int ret = 0;
925
926         ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
927         if (ret)
928                 return ret;
929         return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
930 }
931
932 static int
933 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
934                  const struct rte_flow_action *action, void *data,
935                  struct rte_flow_error *e)
936 {
937         struct ch_filter_specification fs;
938         struct rte_flow_query_count *c;
939         struct filter_entry *f;
940         int ret;
941
942         RTE_SET_USED(dev);
943
944         f = flow->f;
945         fs = f->fs;
946
947         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
948                 return rte_flow_error_set(e, ENOTSUP,
949                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
950                                           "only count supported for query");
951
952         /*
953          * This is a valid operation, Since we are allowed to do chelsio
954          * specific operations in rte side of our code but not vise-versa
955          *
956          * So, fs can be queried/modified here BUT rte_flow_query_count
957          * cannot be worked on by the lower layer since we want to maintain
958          * it as rte_flow agnostic.
959          */
960         if (!fs.hitcnts)
961                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
962                                           &fs, "filter hit counters were not"
963                                           " enabled during filter creation");
964
965         c = (struct rte_flow_query_count *)data;
966         ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
967         if (ret)
968                 return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
969                                           f, "cxgbe pmd failed to"
970                                           " perform query");
971
972         /* Query was successful */
973         c->bytes_set = 1;
974         c->hits_set = 1;
975
976         return 0; /* success / partial_success */
977 }
978
979 static int
980 cxgbe_flow_validate(struct rte_eth_dev *dev,
981                     const struct rte_flow_attr *attr,
982                     const struct rte_flow_item item[],
983                     const struct rte_flow_action action[],
984                     struct rte_flow_error *e)
985 {
986         struct adapter *adap = ethdev2adap(dev);
987         struct rte_flow *flow;
988         unsigned int fidx;
989         int ret;
990
991         flow = t4_os_alloc(sizeof(struct rte_flow));
992         if (!flow)
993                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
994                                 NULL,
995                                 "Unable to allocate memory for filter_entry");
996
997         flow->item_parser = parseitem;
998         flow->dev = dev;
999
1000         ret = cxgbe_flow_parse(flow, attr, item, action, e);
1001         if (ret) {
1002                 t4_os_free(flow);
1003                 return ret;
1004         }
1005
1006         if (validate_filter(adap, &flow->fs)) {
1007                 t4_os_free(flow);
1008                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1009                                 NULL,
1010                                 "validation failed. Check f/w config file.");
1011         }
1012
1013         if (cxgbe_get_fidx(flow, &fidx)) {
1014                 t4_os_free(flow);
1015                 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1016                                           NULL, "no memory in tcam.");
1017         }
1018
1019         if (cxgbe_verify_fidx(flow, fidx, 0)) {
1020                 t4_os_free(flow);
1021                 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1022                                           NULL, "validation failed");
1023         }
1024
1025         t4_os_free(flow);
1026         return 0;
1027 }
1028
1029 /*
1030  * @ret : > 0 filter destroyed succsesfully
1031  *        < 0 error destroying filter
1032  *        == 1 filter not active / not found
1033  */
1034 static int
1035 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
1036                       struct rte_flow_error *e)
1037 {
1038         if (f && (f->valid || f->pending) &&
1039             f->dev == dev && /* Only if user has asked for this port */
1040              f->private) /* We (rte_flow) created this filter */
1041                 return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
1042                                           e);
1043         return 1;
1044 }
1045
1046 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1047 {
1048         struct adapter *adap = ethdev2adap(dev);
1049         unsigned int i;
1050         int ret = 0;
1051
1052         if (adap->tids.ftid_tab) {
1053                 struct filter_entry *f = &adap->tids.ftid_tab[0];
1054
1055                 for (i = 0; i < adap->tids.nftids; i++, f++) {
1056                         ret = cxgbe_check_n_destroy(f, dev, e);
1057                         if (ret < 0)
1058                                 goto out;
1059                 }
1060         }
1061
1062         if (is_hashfilter(adap) && adap->tids.tid_tab) {
1063                 struct filter_entry *f;
1064
1065                 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1066                         f = (struct filter_entry *)adap->tids.tid_tab[i];
1067
1068                         ret = cxgbe_check_n_destroy(f, dev, e);
1069                         if (ret < 0)
1070                                 goto out;
1071                 }
1072         }
1073
1074 out:
1075         return ret >= 0 ? 0 : ret;
1076 }
1077
1078 static const struct rte_flow_ops cxgbe_flow_ops = {
1079         .validate       = cxgbe_flow_validate,
1080         .create         = cxgbe_flow_create,
1081         .destroy        = cxgbe_flow_destroy,
1082         .flush          = cxgbe_flow_flush,
1083         .query          = cxgbe_flow_query,
1084         .isolate        = NULL,
1085 };
1086
1087 int
1088 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1089                       enum rte_filter_type filter_type,
1090                       enum rte_filter_op filter_op,
1091                       void *arg)
1092 {
1093         int ret = 0;
1094
1095         RTE_SET_USED(dev);
1096         switch (filter_type) {
1097         case RTE_ETH_FILTER_GENERIC:
1098                 if (filter_op != RTE_ETH_FILTER_GET)
1099                         return -EINVAL;
1100                 *(const void **)arg = &cxgbe_flow_ops;
1101                 break;
1102         default:
1103                 ret = -ENOTSUP;
1104                 break;
1105         }
1106         return ret;
1107 }