1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
10 if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12 NULL, "Redefined match item with" \
13 " different values found"); \
14 (fs)->val.elem = (__v); \
15 (fs)->mask.elem = (__m); \
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
20 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 #define CXGBE_FILL_FS(v, m, elem) \
25 __CXGBE_FILL_FS(v, m, fs, elem, e)
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
33 /* rte_flow specification does not allow it. */
34 if (!i->spec && (i->mask || i->last))
35 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36 i, "last or mask given without spec");
38 * We don't support it.
39 * Although, we can support values in last as 0's or last == spec.
40 * But this will not provide user with any additional functionality
41 * and will only increase the complexity for us.
44 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45 i, "last is not supported by chelsio pmd");
50 cxgbe_fill_filter_region(struct adapter *adap,
51 struct ch_filter_specification *fs)
53 struct tp_params *tp = &adap->params.tp;
54 u64 hash_filter_mask = tp->hash_filter_mask;
59 if (!is_hashfilter(adap))
63 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
64 0xff, 0xff, 0xff, 0xff,
65 0xff, 0xff, 0xff, 0xff,
66 0xff, 0xff, 0xff, 0xff};
67 uint8_t bitoff[16] = {0};
69 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
70 !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
71 memcmp(fs->mask.lip, biton, sizeof(biton)) ||
72 memcmp(fs->mask.fip, biton, sizeof(biton)))
75 uint32_t biton = 0xffffffff;
76 uint32_t bitoff = 0x0U;
78 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
79 !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
80 memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
81 memcmp(fs->mask.fip, &biton, sizeof(biton)))
85 if (!fs->val.lport || fs->mask.lport != 0xffff)
87 if (!fs->val.fport || fs->mask.fport != 0xffff)
90 if (tp->protocol_shift >= 0)
91 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
92 if (tp->ethertype_shift >= 0)
93 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
94 if (tp->port_shift >= 0)
95 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
96 if (tp->macmatch_shift >= 0)
97 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
99 if (ntuple_mask != hash_filter_mask)
102 fs->cap = 1; /* use hash region */
106 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
107 struct ch_filter_specification *fs,
108 struct rte_flow_error *e)
110 const struct rte_flow_item_eth *spec = item->spec;
111 const struct rte_flow_item_eth *umask = item->mask;
112 const struct rte_flow_item_eth *mask;
114 /* If user has not given any mask, then use chelsio supported mask. */
115 mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
117 /* we don't support SRC_MAC filtering*/
118 if (!rte_is_zero_ether_addr(&mask->src))
119 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
121 "src mac filtering not supported");
123 if (!rte_is_zero_ether_addr(&mask->dst)) {
124 const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
125 const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
126 struct rte_flow *flow = (struct rte_flow *)fs->private;
127 struct port_info *pi = (struct port_info *)
128 (flow->dev->data->dev_private);
131 idx = cxgbe_mpstcam_alloc(pi, addr, m);
133 return rte_flow_error_set(e, idx,
134 RTE_FLOW_ERROR_TYPE_ITEM,
135 NULL, "unable to allocate mac"
137 CXGBE_FILL_FS(idx, 0x1ff, macidx);
140 CXGBE_FILL_FS(be16_to_cpu(spec->type),
141 be16_to_cpu(mask->type), ethtype);
146 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
147 struct ch_filter_specification *fs,
148 struct rte_flow_error *e)
150 const struct rte_flow_item_phy_port *val = item->spec;
151 const struct rte_flow_item_phy_port *umask = item->mask;
152 const struct rte_flow_item_phy_port *mask;
154 mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
156 if (val->index > 0x7)
157 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
159 "port index upto 0x7 is supported");
161 CXGBE_FILL_FS(val->index, mask->index, iport);
167 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
168 struct ch_filter_specification *fs,
169 struct rte_flow_error *e)
171 const struct rte_flow_item_udp *val = item->spec;
172 const struct rte_flow_item_udp *umask = item->mask;
173 const struct rte_flow_item_udp *mask;
175 mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
177 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
178 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
180 "udp: only src/dst port supported");
182 CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
185 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
186 be16_to_cpu(mask->hdr.src_port), fport);
187 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
188 be16_to_cpu(mask->hdr.dst_port), lport);
193 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
194 struct ch_filter_specification *fs,
195 struct rte_flow_error *e)
197 const struct rte_flow_item_tcp *val = item->spec;
198 const struct rte_flow_item_tcp *umask = item->mask;
199 const struct rte_flow_item_tcp *mask;
201 mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
203 if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
204 mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
206 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
208 "tcp: only src/dst port supported");
210 CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
213 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
214 be16_to_cpu(mask->hdr.src_port), fport);
215 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
216 be16_to_cpu(mask->hdr.dst_port), lport);
221 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
222 struct ch_filter_specification *fs,
223 struct rte_flow_error *e)
225 const struct rte_flow_item_ipv4 *val = item->spec;
226 const struct rte_flow_item_ipv4 *umask = item->mask;
227 const struct rte_flow_item_ipv4 *mask;
229 mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
231 if (mask->hdr.time_to_live || mask->hdr.type_of_service)
232 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
233 item, "ttl/tos are not supported");
235 fs->type = FILTER_TYPE_IPV4;
236 CXGBE_FILL_FS(RTE_ETHER_TYPE_IPV4, 0xffff, ethtype);
238 return 0; /* ipv4 wild card */
240 CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
241 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
242 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
248 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
249 struct ch_filter_specification *fs,
250 struct rte_flow_error *e)
252 const struct rte_flow_item_ipv6 *val = item->spec;
253 const struct rte_flow_item_ipv6 *umask = item->mask;
254 const struct rte_flow_item_ipv6 *mask;
256 mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
258 if (mask->hdr.vtc_flow ||
259 mask->hdr.payload_len || mask->hdr.hop_limits)
260 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
262 "tc/flow/hop are not supported");
264 fs->type = FILTER_TYPE_IPV6;
265 CXGBE_FILL_FS(RTE_ETHER_TYPE_IPV6, 0xffff, ethtype);
267 return 0; /* ipv6 wild card */
269 CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
270 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
271 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
277 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
278 struct rte_flow_error *e)
281 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
282 attr, "attribute:<egress> is"
285 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
286 attr, "group parameter is"
289 flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
294 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
296 struct port_info *pi = ethdev2pinfo(dev);
298 if (rxq > pi->n_rx_qsets)
303 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
305 struct adapter *adap = ethdev2adap(f->dev);
306 struct ch_filter_specification fs = f->fs;
309 if (fidx >= adap->tids.nftids) {
310 dev_err(adap, "invalid flow index %d.\n", fidx);
314 nentries = cxgbe_filter_slots(adap, fs.type);
315 if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
316 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
324 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
325 struct adapter *adap, unsigned int fidx)
329 nentries = cxgbe_filter_slots(adap, fs->type);
330 if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
331 dev_err(adap, "filter index: %d is busy.\n", fidx);
335 if (fidx >= adap->tids.nftids) {
336 dev_err(adap, "filter index (%u) >= max(%u)\n",
337 fidx, adap->tids.nftids);
345 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
348 return 0; /* Hash filters */
349 return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
350 cxgbe_validate_fidxonadd(&flow->fs,
351 ethdev2adap(flow->dev), fidx);
354 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
356 struct ch_filter_specification *fs = &flow->fs;
357 struct adapter *adap = ethdev2adap(flow->dev);
359 /* For tcam get the next available slot, if default value specified */
360 if (flow->fidx == FILTER_ID_MAX) {
364 nentries = cxgbe_filter_slots(adap, fs->type);
365 idx = cxgbe_alloc_ftid(adap, nentries);
367 dev_err(adap, "unable to get a filter index in tcam\n");
370 *fidx = (unsigned int)idx;
379 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
381 const struct rte_flow_item *i;
382 int j, index = -ENOENT;
384 for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
385 if (i->type == type) {
395 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
398 * BIT_0 = [src_ip], BIT_1 = [dst_ip]
399 * BIT_2 = [src_port], BIT_3 = [dst_port]
401 * Only below cases are supported as per our spec.
405 fs->nat_mode = NAT_MODE_NONE;
408 fs->nat_mode = NAT_MODE_DIP;
411 fs->nat_mode = NAT_MODE_SIP_SP;
414 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
417 fs->nat_mode = NAT_MODE_DIP_DP;
420 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
423 fs->nat_mode = NAT_MODE_DIP_DP_SP;
426 fs->nat_mode = NAT_MODE_ALL;
436 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
437 const struct rte_flow_item items[],
439 struct ch_filter_specification *fs,
440 struct rte_flow_error *e)
442 const struct rte_flow_action_of_set_vlan_vid *vlanid;
443 const struct rte_flow_action_of_push_vlan *pushvlan;
444 const struct rte_flow_action_set_ipv4 *ipv4;
445 const struct rte_flow_action_set_ipv6 *ipv6;
446 const struct rte_flow_action_set_tp *tp_port;
447 const struct rte_flow_action_phy_port *port;
451 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
452 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
454 fs->newvlan = VLAN_REWRITE;
455 fs->vlan = vlanid->vlan_vid;
457 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
458 pushvlan = (const struct rte_flow_action_of_push_vlan *)
460 if (pushvlan->ethertype != RTE_ETHER_TYPE_VLAN)
461 return rte_flow_error_set(e, EINVAL,
462 RTE_FLOW_ERROR_TYPE_ACTION, a,
463 "only ethertype 0x8100 "
464 "supported for push vlan.");
465 fs->newvlan = VLAN_INSERT;
467 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
468 fs->newvlan = VLAN_REMOVE;
470 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
471 port = (const struct rte_flow_action_phy_port *)a->conf;
472 fs->eport = port->index;
474 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
475 item_index = cxgbe_get_flow_item_index(items,
476 RTE_FLOW_ITEM_TYPE_IPV4);
478 return rte_flow_error_set(e, EINVAL,
479 RTE_FLOW_ERROR_TYPE_ACTION, a,
480 "No RTE_FLOW_ITEM_TYPE_IPV4 "
483 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
484 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
487 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
488 item_index = cxgbe_get_flow_item_index(items,
489 RTE_FLOW_ITEM_TYPE_IPV4);
491 return rte_flow_error_set(e, EINVAL,
492 RTE_FLOW_ERROR_TYPE_ACTION, a,
493 "No RTE_FLOW_ITEM_TYPE_IPV4 "
496 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
497 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
500 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
501 item_index = cxgbe_get_flow_item_index(items,
502 RTE_FLOW_ITEM_TYPE_IPV6);
504 return rte_flow_error_set(e, EINVAL,
505 RTE_FLOW_ERROR_TYPE_ACTION, a,
506 "No RTE_FLOW_ITEM_TYPE_IPV6 "
509 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
510 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
513 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
514 item_index = cxgbe_get_flow_item_index(items,
515 RTE_FLOW_ITEM_TYPE_IPV6);
517 return rte_flow_error_set(e, EINVAL,
518 RTE_FLOW_ERROR_TYPE_ACTION, a,
519 "No RTE_FLOW_ITEM_TYPE_IPV6 "
522 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
523 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
526 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
527 item_index = cxgbe_get_flow_item_index(items,
528 RTE_FLOW_ITEM_TYPE_TCP);
529 if (item_index < 0) {
531 cxgbe_get_flow_item_index(items,
532 RTE_FLOW_ITEM_TYPE_UDP);
534 return rte_flow_error_set(e, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ACTION, a,
536 "No RTE_FLOW_ITEM_TYPE_TCP or "
537 "RTE_FLOW_ITEM_TYPE_UDP found");
540 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
541 fs->nat_fport = be16_to_cpu(tp_port->port);
544 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
545 item_index = cxgbe_get_flow_item_index(items,
546 RTE_FLOW_ITEM_TYPE_TCP);
547 if (item_index < 0) {
549 cxgbe_get_flow_item_index(items,
550 RTE_FLOW_ITEM_TYPE_UDP);
552 return rte_flow_error_set(e, EINVAL,
553 RTE_FLOW_ERROR_TYPE_ACTION, a,
554 "No RTE_FLOW_ITEM_TYPE_TCP or "
555 "RTE_FLOW_ITEM_TYPE_UDP found");
558 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
559 fs->nat_lport = be16_to_cpu(tp_port->port);
562 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
563 item_index = cxgbe_get_flow_item_index(items,
564 RTE_FLOW_ITEM_TYPE_ETH);
566 return rte_flow_error_set(e, EINVAL,
567 RTE_FLOW_ERROR_TYPE_ACTION, a,
568 "No RTE_FLOW_ITEM_TYPE_ETH "
573 /* We are not supposed to come here */
574 return rte_flow_error_set(e, EINVAL,
575 RTE_FLOW_ERROR_TYPE_ACTION, a,
576 "Action not supported");
583 cxgbe_rtef_parse_actions(struct rte_flow *flow,
584 const struct rte_flow_item items[],
585 const struct rte_flow_action action[],
586 struct rte_flow_error *e)
588 struct ch_filter_specification *fs = &flow->fs;
589 uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
590 const struct rte_flow_action_queue *q;
591 const struct rte_flow_action *a;
595 for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
597 case RTE_FLOW_ACTION_TYPE_VOID:
599 case RTE_FLOW_ACTION_TYPE_DROP:
601 return rte_flow_error_set(e, EINVAL,
602 RTE_FLOW_ERROR_TYPE_ACTION, a,
603 "specify only 1 pass/drop");
604 fs->action = FILTER_DROP;
606 case RTE_FLOW_ACTION_TYPE_QUEUE:
607 q = (const struct rte_flow_action_queue *)a->conf;
609 return rte_flow_error_set(e, EINVAL,
610 RTE_FLOW_ERROR_TYPE_ACTION, q,
611 "specify rx queue index");
612 if (check_rxq(flow->dev, q->index))
613 return rte_flow_error_set(e, EINVAL,
614 RTE_FLOW_ERROR_TYPE_ACTION, q,
617 return rte_flow_error_set(e, EINVAL,
618 RTE_FLOW_ERROR_TYPE_ACTION, a,
619 "specify only 1 pass/drop");
620 fs->action = FILTER_PASS;
624 case RTE_FLOW_ACTION_TYPE_COUNT:
627 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
628 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
629 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
630 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
631 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
632 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
633 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
636 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
637 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
640 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
641 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
643 /* We allow multiple switch actions, but switch is
644 * not compatible with either queue or drop
646 if (abit++ && fs->action != FILTER_SWITCH)
647 return rte_flow_error_set(e, EINVAL,
648 RTE_FLOW_ERROR_TYPE_ACTION, a,
649 "overlapping action specified");
650 if (nat_ipv4 && nat_ipv6)
651 return rte_flow_error_set(e, EINVAL,
652 RTE_FLOW_ERROR_TYPE_ACTION, a,
653 "Can't have one address ipv4 and the"
656 ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
660 fs->action = FILTER_SWITCH;
663 /* Not supported action : return error */
664 return rte_flow_error_set(e, ENOTSUP,
665 RTE_FLOW_ERROR_TYPE_ACTION,
666 a, "Action not supported");
670 if (ch_rte_parse_nat(nmode, fs))
671 return rte_flow_error_set(e, EINVAL,
672 RTE_FLOW_ERROR_TYPE_ACTION, a,
673 "invalid settings for swich action");
677 static struct chrte_fparse parseitem[] = {
678 [RTE_FLOW_ITEM_TYPE_ETH] = {
679 .fptr = ch_rte_parsetype_eth,
680 .dmask = &(const struct rte_flow_item_eth){
681 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
682 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
687 [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
688 .fptr = ch_rte_parsetype_port,
689 .dmask = &(const struct rte_flow_item_phy_port){
694 [RTE_FLOW_ITEM_TYPE_IPV4] = {
695 .fptr = ch_rte_parsetype_ipv4,
696 .dmask = &rte_flow_item_ipv4_mask,
699 [RTE_FLOW_ITEM_TYPE_IPV6] = {
700 .fptr = ch_rte_parsetype_ipv6,
701 .dmask = &rte_flow_item_ipv6_mask,
704 [RTE_FLOW_ITEM_TYPE_UDP] = {
705 .fptr = ch_rte_parsetype_udp,
706 .dmask = &rte_flow_item_udp_mask,
709 [RTE_FLOW_ITEM_TYPE_TCP] = {
710 .fptr = ch_rte_parsetype_tcp,
711 .dmask = &rte_flow_item_tcp_mask,
716 cxgbe_rtef_parse_items(struct rte_flow *flow,
717 const struct rte_flow_item items[],
718 struct rte_flow_error *e)
720 struct adapter *adap = ethdev2adap(flow->dev);
721 const struct rte_flow_item *i;
722 char repeat[ARRAY_SIZE(parseitem)] = {0};
724 for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
725 struct chrte_fparse *idx;
728 if (i->type >= ARRAY_SIZE(parseitem))
729 return rte_flow_error_set(e, ENOTSUP,
730 RTE_FLOW_ERROR_TYPE_ITEM,
731 i, "Item not supported");
734 case RTE_FLOW_ITEM_TYPE_VOID:
737 /* check if item is repeated */
739 return rte_flow_error_set(e, ENOTSUP,
740 RTE_FLOW_ERROR_TYPE_ITEM, i,
741 "parse items cannot be repeated (except void)");
744 /* No spec found for this pattern item. Skip it */
748 /* validate the item */
749 ret = cxgbe_validate_item(i, e);
753 idx = &flow->item_parser[i->type];
754 if (!idx || !idx->fptr) {
755 return rte_flow_error_set(e, ENOTSUP,
756 RTE_FLOW_ERROR_TYPE_ITEM, i,
757 "Item not supported");
759 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
766 cxgbe_fill_filter_region(adap, &flow->fs);
772 cxgbe_flow_parse(struct rte_flow *flow,
773 const struct rte_flow_attr *attr,
774 const struct rte_flow_item item[],
775 const struct rte_flow_action action[],
776 struct rte_flow_error *e)
779 /* parse user request into ch_filter_specification */
780 ret = cxgbe_rtef_parse_attr(flow, attr, e);
783 ret = cxgbe_rtef_parse_items(flow, item, e);
786 return cxgbe_rtef_parse_actions(flow, item, action, e);
789 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
791 struct ch_filter_specification *fs = &flow->fs;
792 struct adapter *adap = ethdev2adap(dev);
793 struct tid_info *t = &adap->tids;
794 struct filter_ctx ctx;
798 if (cxgbe_get_fidx(flow, &fidx))
800 if (cxgbe_verify_fidx(flow, fidx, 0))
803 t4_init_completion(&ctx.completion);
804 /* go create the filter */
805 err = cxgbe_set_filter(dev, fidx, fs, &ctx);
807 dev_err(adap, "Error %d while creating filter.\n", err);
811 /* Poll the FW for reply */
812 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
817 dev_err(adap, "Filter set operation timed out (%d)\n", err);
821 dev_err(adap, "Hardware error %d while creating the filter.\n",
826 if (fs->cap) { /* to destroy the filter */
827 flow->fidx = ctx.tid;
828 flow->f = lookup_tid(t, ctx.tid);
831 flow->f = &adap->tids.ftid_tab[fidx];
837 static struct rte_flow *
838 cxgbe_flow_create(struct rte_eth_dev *dev,
839 const struct rte_flow_attr *attr,
840 const struct rte_flow_item item[],
841 const struct rte_flow_action action[],
842 struct rte_flow_error *e)
844 struct rte_flow *flow;
847 flow = t4_os_alloc(sizeof(struct rte_flow));
849 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
850 NULL, "Unable to allocate memory for"
855 flow->item_parser = parseitem;
857 flow->fs.private = (void *)flow;
859 if (cxgbe_flow_parse(flow, attr, item, action, e)) {
864 /* go, interact with cxgbe_filter */
865 ret = __cxgbe_flow_create(dev, flow);
867 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
868 NULL, "Unable to create flow rule");
873 flow->f->private = flow; /* Will be used during flush */
878 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
880 struct adapter *adap = ethdev2adap(dev);
881 struct filter_entry *f = flow->f;
882 struct ch_filter_specification *fs;
883 struct filter_ctx ctx;
887 if (cxgbe_verify_fidx(flow, flow->fidx, 1))
890 t4_init_completion(&ctx.completion);
891 err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
893 dev_err(adap, "Error %d while deleting filter.\n", err);
897 /* Poll the FW for reply */
898 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
903 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
907 dev_err(adap, "Hardware error %d while deleting the filter.\n",
913 if (fs->mask.macidx) {
914 struct port_info *pi = (struct port_info *)
915 (dev->data->dev_private);
918 ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
927 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
928 struct rte_flow_error *e)
932 ret = __cxgbe_flow_destroy(dev, flow);
934 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
935 flow, "error destroying filter.");
940 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
943 struct adapter *adap = ethdev2adap(flow->dev);
944 struct ch_filter_specification fs = flow->f->fs;
945 unsigned int fidx = flow->fidx;
948 ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
951 return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
955 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
956 const struct rte_flow_action *action, void *data,
957 struct rte_flow_error *e)
959 struct adapter *adap = ethdev2adap(flow->dev);
960 struct ch_filter_specification fs;
961 struct rte_flow_query_count *c;
962 struct filter_entry *f;
970 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
971 return rte_flow_error_set(e, ENOTSUP,
972 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
973 "only count supported for query");
976 * This is a valid operation, Since we are allowed to do chelsio
977 * specific operations in rte side of our code but not vise-versa
979 * So, fs can be queried/modified here BUT rte_flow_query_count
980 * cannot be worked on by the lower layer since we want to maintain
981 * it as rte_flow agnostic.
984 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
985 &fs, "filter hit counters were not"
986 " enabled during filter creation");
988 c = (struct rte_flow_query_count *)data;
989 ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
991 return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
992 f, "cxgbe pmd failed to"
995 /* Query was successful */
999 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1001 return 0; /* success / partial_success */
1005 cxgbe_flow_validate(struct rte_eth_dev *dev,
1006 const struct rte_flow_attr *attr,
1007 const struct rte_flow_item item[],
1008 const struct rte_flow_action action[],
1009 struct rte_flow_error *e)
1011 struct adapter *adap = ethdev2adap(dev);
1012 struct rte_flow *flow;
1016 flow = t4_os_alloc(sizeof(struct rte_flow));
1018 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1020 "Unable to allocate memory for filter_entry");
1022 flow->item_parser = parseitem;
1025 ret = cxgbe_flow_parse(flow, attr, item, action, e);
1031 if (cxgbe_validate_filter(adap, &flow->fs)) {
1033 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1035 "validation failed. Check f/w config file.");
1038 if (cxgbe_get_fidx(flow, &fidx)) {
1040 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1041 NULL, "no memory in tcam.");
1044 if (cxgbe_verify_fidx(flow, fidx, 0)) {
1046 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1047 NULL, "validation failed");
1055 * @ret : > 0 filter destroyed succsesfully
1056 * < 0 error destroying filter
1057 * == 1 filter not active / not found
1060 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
1061 struct rte_flow_error *e)
1063 if (f && (f->valid || f->pending) &&
1064 f->dev == dev && /* Only if user has asked for this port */
1065 f->private) /* We (rte_flow) created this filter */
1066 return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
1071 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1073 struct adapter *adap = ethdev2adap(dev);
1077 if (adap->tids.ftid_tab) {
1078 struct filter_entry *f = &adap->tids.ftid_tab[0];
1080 for (i = 0; i < adap->tids.nftids; i++, f++) {
1081 ret = cxgbe_check_n_destroy(f, dev, e);
1087 if (is_hashfilter(adap) && adap->tids.tid_tab) {
1088 struct filter_entry *f;
1090 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1091 f = (struct filter_entry *)adap->tids.tid_tab[i];
1093 ret = cxgbe_check_n_destroy(f, dev, e);
1100 return ret >= 0 ? 0 : ret;
1103 static const struct rte_flow_ops cxgbe_flow_ops = {
1104 .validate = cxgbe_flow_validate,
1105 .create = cxgbe_flow_create,
1106 .destroy = cxgbe_flow_destroy,
1107 .flush = cxgbe_flow_flush,
1108 .query = cxgbe_flow_query,
1113 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1114 enum rte_filter_type filter_type,
1115 enum rte_filter_op filter_op,
1121 switch (filter_type) {
1122 case RTE_ETH_FILTER_GENERIC:
1123 if (filter_op != RTE_ETH_FILTER_GET)
1125 *(const void **)arg = &cxgbe_flow_ops;