1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
6 #include "cxgbe_flow.h"
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
10 if (!((fs)->val.elem || (fs)->mask.elem)) { \
11 (fs)->val.elem = (__v); \
12 (fs)->mask.elem = (__m); \
14 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
15 NULL, "a filter can be specified" \
20 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
22 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
23 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
26 #define CXGBE_FILL_FS(v, m, elem) \
27 __CXGBE_FILL_FS(v, m, fs, elem, e)
29 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
30 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
33 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
35 /* rte_flow specification does not allow it. */
36 if (!i->spec && (i->mask || i->last))
37 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
38 i, "last or mask given without spec");
40 * We don't support it.
41 * Although, we can support values in last as 0's or last == spec.
42 * But this will not provide user with any additional functionality
43 * and will only increase the complexity for us.
46 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
47 i, "last is not supported by chelsio pmd");
52 cxgbe_fill_filter_region(struct adapter *adap,
53 struct ch_filter_specification *fs)
55 struct tp_params *tp = &adap->params.tp;
56 u64 hash_filter_mask = tp->hash_filter_mask;
61 if (!is_hashfilter(adap))
65 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
66 0xff, 0xff, 0xff, 0xff,
67 0xff, 0xff, 0xff, 0xff,
68 0xff, 0xff, 0xff, 0xff};
69 uint8_t bitoff[16] = {0};
71 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
72 !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
73 memcmp(fs->mask.lip, biton, sizeof(biton)) ||
74 memcmp(fs->mask.fip, biton, sizeof(biton)))
77 uint32_t biton = 0xffffffff;
78 uint32_t bitoff = 0x0U;
80 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
81 !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
82 memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
83 memcmp(fs->mask.fip, &biton, sizeof(biton)))
87 if (!fs->val.lport || fs->mask.lport != 0xffff)
89 if (!fs->val.fport || fs->mask.fport != 0xffff)
92 if (tp->protocol_shift >= 0)
93 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
94 if (tp->ethertype_shift >= 0)
95 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
96 if (tp->port_shift >= 0)
97 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
98 if (tp->macmatch_shift >= 0)
99 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
101 if (ntuple_mask != hash_filter_mask)
104 fs->cap = 1; /* use hash region */
108 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
109 struct ch_filter_specification *fs,
110 struct rte_flow_error *e)
112 const struct rte_flow_item_eth *spec = item->spec;
113 const struct rte_flow_item_eth *umask = item->mask;
114 const struct rte_flow_item_eth *mask;
116 /* If user has not given any mask, then use chelsio supported mask. */
117 mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
119 /* we don't support SRC_MAC filtering*/
120 if (!is_zero_ether_addr(&mask->src))
121 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
123 "src mac filtering not supported");
125 if (!is_zero_ether_addr(&mask->dst)) {
126 const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
127 const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
128 struct rte_flow *flow = (struct rte_flow *)fs->private;
129 struct port_info *pi = (struct port_info *)
130 (flow->dev->data->dev_private);
133 idx = cxgbe_mpstcam_alloc(pi, addr, m);
135 return rte_flow_error_set(e, idx,
136 RTE_FLOW_ERROR_TYPE_ITEM,
137 NULL, "unable to allocate mac"
139 CXGBE_FILL_FS(idx, 0x1ff, macidx);
142 CXGBE_FILL_FS(be16_to_cpu(spec->type),
143 be16_to_cpu(mask->type), ethtype);
148 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
149 struct ch_filter_specification *fs,
150 struct rte_flow_error *e)
152 const struct rte_flow_item_phy_port *val = item->spec;
153 const struct rte_flow_item_phy_port *umask = item->mask;
154 const struct rte_flow_item_phy_port *mask;
156 mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
158 if (val->index > 0x7)
159 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
161 "port index upto 0x7 is supported");
163 CXGBE_FILL_FS(val->index, mask->index, iport);
169 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
170 struct ch_filter_specification *fs,
171 struct rte_flow_error *e)
173 const struct rte_flow_item_udp *val = item->spec;
174 const struct rte_flow_item_udp *umask = item->mask;
175 const struct rte_flow_item_udp *mask;
177 mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
179 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
180 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
182 "udp: only src/dst port supported");
184 CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
187 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
188 be16_to_cpu(mask->hdr.src_port), fport);
189 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
190 be16_to_cpu(mask->hdr.dst_port), lport);
195 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
196 struct ch_filter_specification *fs,
197 struct rte_flow_error *e)
199 const struct rte_flow_item_tcp *val = item->spec;
200 const struct rte_flow_item_tcp *umask = item->mask;
201 const struct rte_flow_item_tcp *mask;
203 mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
205 if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
206 mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
208 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
210 "tcp: only src/dst port supported");
212 CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
215 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
216 be16_to_cpu(mask->hdr.src_port), fport);
217 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
218 be16_to_cpu(mask->hdr.dst_port), lport);
223 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
224 struct ch_filter_specification *fs,
225 struct rte_flow_error *e)
227 const struct rte_flow_item_ipv4 *val = item->spec;
228 const struct rte_flow_item_ipv4 *umask = item->mask;
229 const struct rte_flow_item_ipv4 *mask;
231 mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
233 if (mask->hdr.time_to_live || mask->hdr.type_of_service)
234 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
235 item, "ttl/tos are not supported");
237 fs->type = FILTER_TYPE_IPV4;
238 CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
240 return 0; /* ipv4 wild card */
242 CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
243 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
244 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
250 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
251 struct ch_filter_specification *fs,
252 struct rte_flow_error *e)
254 const struct rte_flow_item_ipv6 *val = item->spec;
255 const struct rte_flow_item_ipv6 *umask = item->mask;
256 const struct rte_flow_item_ipv6 *mask;
258 mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
260 if (mask->hdr.vtc_flow ||
261 mask->hdr.payload_len || mask->hdr.hop_limits)
262 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
264 "tc/flow/hop are not supported");
266 fs->type = FILTER_TYPE_IPV6;
267 CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
269 return 0; /* ipv6 wild card */
271 CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
272 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
273 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
279 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
280 struct rte_flow_error *e)
283 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
284 attr, "attribute:<egress> is"
287 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
288 attr, "group parameter is"
291 flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
296 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
298 struct port_info *pi = ethdev2pinfo(dev);
300 if (rxq > pi->n_rx_qsets)
305 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
307 struct adapter *adap = ethdev2adap(f->dev);
308 struct ch_filter_specification fs = f->fs;
310 if (fidx >= adap->tids.nftids) {
311 dev_err(adap, "invalid flow index %d.\n", fidx);
314 if (!is_filter_set(&adap->tids, fidx, fs.type)) {
315 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
323 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
324 struct adapter *adap, unsigned int fidx)
326 if (is_filter_set(&adap->tids, fidx, fs->type)) {
327 dev_err(adap, "filter index: %d is busy.\n", fidx);
330 if (fidx >= adap->tids.nftids) {
331 dev_err(adap, "filter index (%u) >= max(%u)\n",
332 fidx, adap->tids.nftids);
340 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
343 return 0; /* Hash filters */
344 return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
345 cxgbe_validate_fidxonadd(&flow->fs,
346 ethdev2adap(flow->dev), fidx);
349 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
351 struct ch_filter_specification *fs = &flow->fs;
352 struct adapter *adap = ethdev2adap(flow->dev);
354 /* For tcam get the next available slot, if default value specified */
355 if (flow->fidx == FILTER_ID_MAX) {
358 idx = cxgbe_alloc_ftid(adap, fs->type);
360 dev_err(adap, "unable to get a filter index in tcam\n");
363 *fidx = (unsigned int)idx;
372 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
374 const struct rte_flow_item *i;
375 int j, index = -ENOENT;
377 for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
378 if (i->type == type) {
388 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
391 * BIT_0 = [src_ip], BIT_1 = [dst_ip]
392 * BIT_2 = [src_port], BIT_3 = [dst_port]
394 * Only below cases are supported as per our spec.
398 fs->nat_mode = NAT_MODE_NONE;
401 fs->nat_mode = NAT_MODE_DIP;
404 fs->nat_mode = NAT_MODE_SIP_SP;
407 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
410 fs->nat_mode = NAT_MODE_DIP_DP;
413 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
416 fs->nat_mode = NAT_MODE_DIP_DP_SP;
419 fs->nat_mode = NAT_MODE_ALL;
429 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
430 const struct rte_flow_item items[],
432 struct ch_filter_specification *fs,
433 struct rte_flow_error *e)
435 const struct rte_flow_action_of_set_vlan_vid *vlanid;
436 const struct rte_flow_action_of_push_vlan *pushvlan;
437 const struct rte_flow_action_set_ipv4 *ipv4;
438 const struct rte_flow_action_set_ipv6 *ipv6;
439 const struct rte_flow_action_set_tp *tp_port;
440 const struct rte_flow_action_phy_port *port;
444 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
445 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
447 fs->newvlan = VLAN_REWRITE;
448 fs->vlan = vlanid->vlan_vid;
450 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
451 pushvlan = (const struct rte_flow_action_of_push_vlan *)
453 if (pushvlan->ethertype != ETHER_TYPE_VLAN)
454 return rte_flow_error_set(e, EINVAL,
455 RTE_FLOW_ERROR_TYPE_ACTION, a,
456 "only ethertype 0x8100 "
457 "supported for push vlan.");
458 fs->newvlan = VLAN_INSERT;
460 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
461 fs->newvlan = VLAN_REMOVE;
463 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
464 port = (const struct rte_flow_action_phy_port *)a->conf;
465 fs->eport = port->index;
467 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
468 item_index = cxgbe_get_flow_item_index(items,
469 RTE_FLOW_ITEM_TYPE_IPV4);
471 return rte_flow_error_set(e, EINVAL,
472 RTE_FLOW_ERROR_TYPE_ACTION, a,
473 "No RTE_FLOW_ITEM_TYPE_IPV4 "
476 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
477 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
480 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
481 item_index = cxgbe_get_flow_item_index(items,
482 RTE_FLOW_ITEM_TYPE_IPV4);
484 return rte_flow_error_set(e, EINVAL,
485 RTE_FLOW_ERROR_TYPE_ACTION, a,
486 "No RTE_FLOW_ITEM_TYPE_IPV4 "
489 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
490 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
493 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
494 item_index = cxgbe_get_flow_item_index(items,
495 RTE_FLOW_ITEM_TYPE_IPV6);
497 return rte_flow_error_set(e, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ACTION, a,
499 "No RTE_FLOW_ITEM_TYPE_IPV6 "
502 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
503 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
506 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
507 item_index = cxgbe_get_flow_item_index(items,
508 RTE_FLOW_ITEM_TYPE_IPV6);
510 return rte_flow_error_set(e, EINVAL,
511 RTE_FLOW_ERROR_TYPE_ACTION, a,
512 "No RTE_FLOW_ITEM_TYPE_IPV6 "
515 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
516 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
519 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
520 item_index = cxgbe_get_flow_item_index(items,
521 RTE_FLOW_ITEM_TYPE_TCP);
522 if (item_index < 0) {
524 cxgbe_get_flow_item_index(items,
525 RTE_FLOW_ITEM_TYPE_UDP);
527 return rte_flow_error_set(e, EINVAL,
528 RTE_FLOW_ERROR_TYPE_ACTION, a,
529 "No RTE_FLOW_ITEM_TYPE_TCP or "
530 "RTE_FLOW_ITEM_TYPE_UDP found");
533 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
534 fs->nat_fport = be16_to_cpu(tp_port->port);
537 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
538 item_index = cxgbe_get_flow_item_index(items,
539 RTE_FLOW_ITEM_TYPE_TCP);
540 if (item_index < 0) {
542 cxgbe_get_flow_item_index(items,
543 RTE_FLOW_ITEM_TYPE_UDP);
545 return rte_flow_error_set(e, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ACTION, a,
547 "No RTE_FLOW_ITEM_TYPE_TCP or "
548 "RTE_FLOW_ITEM_TYPE_UDP found");
551 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
552 fs->nat_lport = be16_to_cpu(tp_port->port);
556 /* We are not supposed to come here */
557 return rte_flow_error_set(e, EINVAL,
558 RTE_FLOW_ERROR_TYPE_ACTION, a,
559 "Action not supported");
566 cxgbe_rtef_parse_actions(struct rte_flow *flow,
567 const struct rte_flow_item items[],
568 const struct rte_flow_action action[],
569 struct rte_flow_error *e)
571 struct ch_filter_specification *fs = &flow->fs;
572 uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
573 const struct rte_flow_action_queue *q;
574 const struct rte_flow_action *a;
578 for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
580 case RTE_FLOW_ACTION_TYPE_VOID:
582 case RTE_FLOW_ACTION_TYPE_DROP:
584 return rte_flow_error_set(e, EINVAL,
585 RTE_FLOW_ERROR_TYPE_ACTION, a,
586 "specify only 1 pass/drop");
587 fs->action = FILTER_DROP;
589 case RTE_FLOW_ACTION_TYPE_QUEUE:
590 q = (const struct rte_flow_action_queue *)a->conf;
592 return rte_flow_error_set(e, EINVAL,
593 RTE_FLOW_ERROR_TYPE_ACTION, q,
594 "specify rx queue index");
595 if (check_rxq(flow->dev, q->index))
596 return rte_flow_error_set(e, EINVAL,
597 RTE_FLOW_ERROR_TYPE_ACTION, q,
600 return rte_flow_error_set(e, EINVAL,
601 RTE_FLOW_ERROR_TYPE_ACTION, a,
602 "specify only 1 pass/drop");
603 fs->action = FILTER_PASS;
607 case RTE_FLOW_ACTION_TYPE_COUNT:
610 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
611 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
612 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
613 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
614 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
615 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
618 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
619 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
622 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
623 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
625 /* We allow multiple switch actions, but switch is
626 * not compatible with either queue or drop
628 if (abit++ && fs->action != FILTER_SWITCH)
629 return rte_flow_error_set(e, EINVAL,
630 RTE_FLOW_ERROR_TYPE_ACTION, a,
631 "overlapping action specified");
632 if (nat_ipv4 && nat_ipv6)
633 return rte_flow_error_set(e, EINVAL,
634 RTE_FLOW_ERROR_TYPE_ACTION, a,
635 "Can't have one address ipv4 and the"
638 ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
642 fs->action = FILTER_SWITCH;
645 /* Not supported action : return error */
646 return rte_flow_error_set(e, ENOTSUP,
647 RTE_FLOW_ERROR_TYPE_ACTION,
648 a, "Action not supported");
652 if (ch_rte_parse_nat(nmode, fs))
653 return rte_flow_error_set(e, EINVAL,
654 RTE_FLOW_ERROR_TYPE_ACTION, a,
655 "invalid settings for swich action");
659 struct chrte_fparse parseitem[] = {
660 [RTE_FLOW_ITEM_TYPE_ETH] = {
661 .fptr = ch_rte_parsetype_eth,
662 .dmask = &(const struct rte_flow_item_eth){
663 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
664 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
669 [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
670 .fptr = ch_rte_parsetype_port,
671 .dmask = &(const struct rte_flow_item_phy_port){
676 [RTE_FLOW_ITEM_TYPE_IPV4] = {
677 .fptr = ch_rte_parsetype_ipv4,
678 .dmask = &rte_flow_item_ipv4_mask,
681 [RTE_FLOW_ITEM_TYPE_IPV6] = {
682 .fptr = ch_rte_parsetype_ipv6,
683 .dmask = &rte_flow_item_ipv6_mask,
686 [RTE_FLOW_ITEM_TYPE_UDP] = {
687 .fptr = ch_rte_parsetype_udp,
688 .dmask = &rte_flow_item_udp_mask,
691 [RTE_FLOW_ITEM_TYPE_TCP] = {
692 .fptr = ch_rte_parsetype_tcp,
693 .dmask = &rte_flow_item_tcp_mask,
698 cxgbe_rtef_parse_items(struct rte_flow *flow,
699 const struct rte_flow_item items[],
700 struct rte_flow_error *e)
702 struct adapter *adap = ethdev2adap(flow->dev);
703 const struct rte_flow_item *i;
704 char repeat[ARRAY_SIZE(parseitem)] = {0};
706 for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
707 struct chrte_fparse *idx;
710 if (i->type >= ARRAY_SIZE(parseitem))
711 return rte_flow_error_set(e, ENOTSUP,
712 RTE_FLOW_ERROR_TYPE_ITEM,
713 i, "Item not supported");
716 case RTE_FLOW_ITEM_TYPE_VOID:
719 /* check if item is repeated */
721 return rte_flow_error_set(e, ENOTSUP,
722 RTE_FLOW_ERROR_TYPE_ITEM, i,
723 "parse items cannot be repeated (except void)");
726 /* validate the item */
727 ret = cxgbe_validate_item(i, e);
731 idx = &flow->item_parser[i->type];
732 if (!idx || !idx->fptr) {
733 return rte_flow_error_set(e, ENOTSUP,
734 RTE_FLOW_ERROR_TYPE_ITEM, i,
735 "Item not supported");
737 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
744 cxgbe_fill_filter_region(adap, &flow->fs);
750 cxgbe_flow_parse(struct rte_flow *flow,
751 const struct rte_flow_attr *attr,
752 const struct rte_flow_item item[],
753 const struct rte_flow_action action[],
754 struct rte_flow_error *e)
757 /* parse user request into ch_filter_specification */
758 ret = cxgbe_rtef_parse_attr(flow, attr, e);
761 ret = cxgbe_rtef_parse_items(flow, item, e);
764 return cxgbe_rtef_parse_actions(flow, item, action, e);
767 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
769 struct ch_filter_specification *fs = &flow->fs;
770 struct adapter *adap = ethdev2adap(dev);
771 struct tid_info *t = &adap->tids;
772 struct filter_ctx ctx;
776 if (cxgbe_get_fidx(flow, &fidx))
778 if (cxgbe_verify_fidx(flow, fidx, 0))
781 t4_init_completion(&ctx.completion);
782 /* go create the filter */
783 err = cxgbe_set_filter(dev, fidx, fs, &ctx);
785 dev_err(adap, "Error %d while creating filter.\n", err);
789 /* Poll the FW for reply */
790 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
795 dev_err(adap, "Filter set operation timed out (%d)\n", err);
799 dev_err(adap, "Hardware error %d while creating the filter.\n",
804 if (fs->cap) { /* to destroy the filter */
805 flow->fidx = ctx.tid;
806 flow->f = lookup_tid(t, ctx.tid);
809 flow->f = &adap->tids.ftid_tab[fidx];
815 static struct rte_flow *
816 cxgbe_flow_create(struct rte_eth_dev *dev,
817 const struct rte_flow_attr *attr,
818 const struct rte_flow_item item[],
819 const struct rte_flow_action action[],
820 struct rte_flow_error *e)
822 struct rte_flow *flow;
825 flow = t4_os_alloc(sizeof(struct rte_flow));
827 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
828 NULL, "Unable to allocate memory for"
833 flow->item_parser = parseitem;
835 flow->fs.private = (void *)flow;
837 if (cxgbe_flow_parse(flow, attr, item, action, e)) {
842 /* go, interact with cxgbe_filter */
843 ret = __cxgbe_flow_create(dev, flow);
845 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
846 NULL, "Unable to create flow rule");
851 flow->f->private = flow; /* Will be used during flush */
856 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
858 struct adapter *adap = ethdev2adap(dev);
859 struct filter_entry *f = flow->f;
860 struct ch_filter_specification *fs;
861 struct filter_ctx ctx;
865 if (cxgbe_verify_fidx(flow, flow->fidx, 1))
868 t4_init_completion(&ctx.completion);
869 err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
871 dev_err(adap, "Error %d while deleting filter.\n", err);
875 /* Poll the FW for reply */
876 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
881 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
885 dev_err(adap, "Hardware error %d while deleting the filter.\n",
891 if (fs->mask.macidx) {
892 struct port_info *pi = (struct port_info *)
893 (dev->data->dev_private);
896 ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
905 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
906 struct rte_flow_error *e)
910 ret = __cxgbe_flow_destroy(dev, flow);
912 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
913 flow, "error destroying filter.");
918 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
921 struct adapter *adap = ethdev2adap(flow->dev);
922 struct ch_filter_specification fs = flow->f->fs;
923 unsigned int fidx = flow->fidx;
926 ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
929 return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
933 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
934 const struct rte_flow_action *action, void *data,
935 struct rte_flow_error *e)
937 struct ch_filter_specification fs;
938 struct rte_flow_query_count *c;
939 struct filter_entry *f;
947 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
948 return rte_flow_error_set(e, ENOTSUP,
949 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
950 "only count supported for query");
953 * This is a valid operation, Since we are allowed to do chelsio
954 * specific operations in rte side of our code but not vise-versa
956 * So, fs can be queried/modified here BUT rte_flow_query_count
957 * cannot be worked on by the lower layer since we want to maintain
958 * it as rte_flow agnostic.
961 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
962 &fs, "filter hit counters were not"
963 " enabled during filter creation");
965 c = (struct rte_flow_query_count *)data;
966 ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
968 return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
969 f, "cxgbe pmd failed to"
972 /* Query was successful */
976 return 0; /* success / partial_success */
980 cxgbe_flow_validate(struct rte_eth_dev *dev,
981 const struct rte_flow_attr *attr,
982 const struct rte_flow_item item[],
983 const struct rte_flow_action action[],
984 struct rte_flow_error *e)
986 struct adapter *adap = ethdev2adap(dev);
987 struct rte_flow *flow;
991 flow = t4_os_alloc(sizeof(struct rte_flow));
993 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
995 "Unable to allocate memory for filter_entry");
997 flow->item_parser = parseitem;
1000 ret = cxgbe_flow_parse(flow, attr, item, action, e);
1006 if (validate_filter(adap, &flow->fs)) {
1008 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1010 "validation failed. Check f/w config file.");
1013 if (cxgbe_get_fidx(flow, &fidx)) {
1015 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1016 NULL, "no memory in tcam.");
1019 if (cxgbe_verify_fidx(flow, fidx, 0)) {
1021 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1022 NULL, "validation failed");
1030 * @ret : > 0 filter destroyed succsesfully
1031 * < 0 error destroying filter
1032 * == 1 filter not active / not found
1035 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
1036 struct rte_flow_error *e)
1038 if (f && (f->valid || f->pending) &&
1039 f->dev == dev && /* Only if user has asked for this port */
1040 f->private) /* We (rte_flow) created this filter */
1041 return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
1046 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1048 struct adapter *adap = ethdev2adap(dev);
1052 if (adap->tids.ftid_tab) {
1053 struct filter_entry *f = &adap->tids.ftid_tab[0];
1055 for (i = 0; i < adap->tids.nftids; i++, f++) {
1056 ret = cxgbe_check_n_destroy(f, dev, e);
1062 if (is_hashfilter(adap) && adap->tids.tid_tab) {
1063 struct filter_entry *f;
1065 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1066 f = (struct filter_entry *)adap->tids.tid_tab[i];
1068 ret = cxgbe_check_n_destroy(f, dev, e);
1075 return ret >= 0 ? 0 : ret;
1078 static const struct rte_flow_ops cxgbe_flow_ops = {
1079 .validate = cxgbe_flow_validate,
1080 .create = cxgbe_flow_create,
1081 .destroy = cxgbe_flow_destroy,
1082 .flush = cxgbe_flow_flush,
1083 .query = cxgbe_flow_query,
1088 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1089 enum rte_filter_type filter_type,
1090 enum rte_filter_op filter_op,
1096 switch (filter_type) {
1097 case RTE_ETH_FILTER_GENERIC:
1098 if (filter_op != RTE_ETH_FILTER_GET)
1100 *(const void **)arg = &cxgbe_flow_ops;