1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
10 if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12 NULL, "Redefined match item with" \
13 " different values found"); \
14 (fs)->val.elem = (__v); \
15 (fs)->mask.elem = (__m); \
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
20 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 #define CXGBE_FILL_FS(v, m, elem) \
25 __CXGBE_FILL_FS(v, m, fs, elem, e)
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
33 /* rte_flow specification does not allow it. */
34 if (!i->spec && (i->mask || i->last))
35 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36 i, "last or mask given without spec");
38 * We don't support it.
39 * Although, we can support values in last as 0's or last == spec.
40 * But this will not provide user with any additional functionality
41 * and will only increase the complexity for us.
44 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45 i, "last is not supported by chelsio pmd");
50 * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51 * there's only 40-bits available to store match fields.
52 * So, to save space, optimize filter spec for some common
53 * known fields that hardware can parse against incoming
54 * packets automatically.
57 cxgbe_tweak_filter_spec(struct adapter *adap,
58 struct ch_filter_specification *fs)
60 /* Save 16-bit ethertype field space, by setting corresponding
61 * 1-bit flags in the filter spec for common known ethertypes.
62 * When hardware sees these flags, it automatically infers and
63 * matches incoming packets against the corresponding ethertype.
65 if (fs->mask.ethtype == 0xffff) {
66 switch (fs->val.ethtype) {
67 case RTE_ETHER_TYPE_IPV4:
68 if (adap->params.tp.ethertype_shift < 0) {
69 fs->type = FILTER_TYPE_IPV4;
74 case RTE_ETHER_TYPE_IPV6:
75 if (adap->params.tp.ethertype_shift < 0) {
76 fs->type = FILTER_TYPE_IPV6;
81 case RTE_ETHER_TYPE_VLAN:
82 if (adap->params.tp.ethertype_shift < 0 &&
83 adap->params.tp.vlan_shift >= 0) {
84 fs->val.ivlan_vld = 1;
85 fs->mask.ivlan_vld = 1;
90 case RTE_ETHER_TYPE_QINQ:
91 if (adap->params.tp.ethertype_shift < 0 &&
92 adap->params.tp.vnic_shift >= 0) {
93 fs->val.ovlan_vld = 1;
94 fs->mask.ovlan_vld = 1;
106 cxgbe_fill_filter_region(struct adapter *adap,
107 struct ch_filter_specification *fs)
109 struct tp_params *tp = &adap->params.tp;
110 u64 hash_filter_mask = tp->hash_filter_mask;
115 if (!is_hashfilter(adap))
119 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
120 0xff, 0xff, 0xff, 0xff,
121 0xff, 0xff, 0xff, 0xff,
122 0xff, 0xff, 0xff, 0xff};
123 uint8_t bitoff[16] = {0};
125 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
126 !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
127 memcmp(fs->mask.lip, biton, sizeof(biton)) ||
128 memcmp(fs->mask.fip, biton, sizeof(biton)))
131 uint32_t biton = 0xffffffff;
132 uint32_t bitoff = 0x0U;
134 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
135 !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
136 memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
137 memcmp(fs->mask.fip, &biton, sizeof(biton)))
141 if (!fs->val.lport || fs->mask.lport != 0xffff)
143 if (!fs->val.fport || fs->mask.fport != 0xffff)
146 if (tp->protocol_shift >= 0)
147 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
148 if (tp->ethertype_shift >= 0)
149 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
150 if (tp->port_shift >= 0)
151 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
152 if (tp->macmatch_shift >= 0)
153 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
154 if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
155 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
157 if (tp->vnic_shift >= 0 && fs->mask.ovlan_vld)
158 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ovlan) <<
161 if (ntuple_mask != hash_filter_mask)
164 fs->cap = 1; /* use hash region */
168 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
169 struct ch_filter_specification *fs,
170 struct rte_flow_error *e)
172 const struct rte_flow_item_eth *spec = item->spec;
173 const struct rte_flow_item_eth *umask = item->mask;
174 const struct rte_flow_item_eth *mask;
176 /* If user has not given any mask, then use chelsio supported mask. */
177 mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
182 /* we don't support SRC_MAC filtering*/
183 if (!rte_is_zero_ether_addr(&mask->src))
184 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
186 "src mac filtering not supported");
188 if (!rte_is_zero_ether_addr(&mask->dst)) {
189 const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
190 const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
191 struct rte_flow *flow = (struct rte_flow *)fs->private;
192 struct port_info *pi = (struct port_info *)
193 (flow->dev->data->dev_private);
196 idx = cxgbe_mpstcam_alloc(pi, addr, m);
198 return rte_flow_error_set(e, idx,
199 RTE_FLOW_ERROR_TYPE_ITEM,
200 NULL, "unable to allocate mac"
202 CXGBE_FILL_FS(idx, 0x1ff, macidx);
205 CXGBE_FILL_FS(be16_to_cpu(spec->type),
206 be16_to_cpu(mask->type), ethtype);
212 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
213 struct ch_filter_specification *fs,
214 struct rte_flow_error *e)
216 const struct rte_flow_item_phy_port *val = item->spec;
217 const struct rte_flow_item_phy_port *umask = item->mask;
218 const struct rte_flow_item_phy_port *mask;
220 mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
222 if (val->index > 0x7)
223 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
225 "port index upto 0x7 is supported");
227 CXGBE_FILL_FS(val->index, mask->index, iport);
233 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
234 struct ch_filter_specification *fs,
235 struct rte_flow_error *e)
237 const struct rte_flow_item_vlan *spec = item->spec;
238 const struct rte_flow_item_vlan *umask = item->mask;
239 const struct rte_flow_item_vlan *mask;
241 /* If user has not given any mask, then use chelsio supported mask. */
242 mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
244 if (!fs->mask.ethtype)
245 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
247 "Can't parse VLAN item without knowing ethertype");
249 /* If ethertype is already set and is not VLAN (0x8100) or
250 * QINQ(0x88A8), then don't proceed further. Otherwise,
251 * reset the outer ethertype, so that it can be replaced by
252 * innermost ethertype. Note that hardware will automatically
253 * match against VLAN or QINQ packets, based on 'ivlan_vld' or
254 * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
256 if (fs->mask.ethtype) {
257 if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
258 fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
259 return rte_flow_error_set(e, EINVAL,
260 RTE_FLOW_ERROR_TYPE_ITEM,
262 "Ethertype must be 0x8100 or 0x88a8");
265 if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
266 CXGBE_FILL_FS(1, 1, ovlan_vld);
268 CXGBE_FILL_FS(be16_to_cpu(spec->tci),
269 be16_to_cpu(mask->tci), ovlan);
271 fs->mask.ethtype = 0;
274 } else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {
275 CXGBE_FILL_FS(1, 1, ivlan_vld);
277 CXGBE_FILL_FS(be16_to_cpu(spec->tci),
278 be16_to_cpu(mask->tci), ivlan);
280 fs->mask.ethtype = 0;
286 CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
287 be16_to_cpu(mask->inner_type), ethtype);
293 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
294 struct ch_filter_specification *fs,
295 struct rte_flow_error *e)
297 const struct rte_flow_item_udp *val = item->spec;
298 const struct rte_flow_item_udp *umask = item->mask;
299 const struct rte_flow_item_udp *mask;
301 mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
303 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
304 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
306 "udp: only src/dst port supported");
308 CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
311 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
312 be16_to_cpu(mask->hdr.src_port), fport);
313 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
314 be16_to_cpu(mask->hdr.dst_port), lport);
319 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
320 struct ch_filter_specification *fs,
321 struct rte_flow_error *e)
323 const struct rte_flow_item_tcp *val = item->spec;
324 const struct rte_flow_item_tcp *umask = item->mask;
325 const struct rte_flow_item_tcp *mask;
327 mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
329 if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
330 mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
332 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
334 "tcp: only src/dst port supported");
336 CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
339 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
340 be16_to_cpu(mask->hdr.src_port), fport);
341 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
342 be16_to_cpu(mask->hdr.dst_port), lport);
347 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
348 struct ch_filter_specification *fs,
349 struct rte_flow_error *e)
351 const struct rte_flow_item_ipv4 *val = item->spec;
352 const struct rte_flow_item_ipv4 *umask = item->mask;
353 const struct rte_flow_item_ipv4 *mask;
355 mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
357 if (mask->hdr.time_to_live || mask->hdr.type_of_service)
358 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
359 item, "ttl/tos are not supported");
361 if (fs->mask.ethtype &&
362 (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
363 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
365 "Couldn't find IPv4 ethertype");
366 fs->type = FILTER_TYPE_IPV4;
368 return 0; /* ipv4 wild card */
370 CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
371 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
372 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
378 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
379 struct ch_filter_specification *fs,
380 struct rte_flow_error *e)
382 const struct rte_flow_item_ipv6 *val = item->spec;
383 const struct rte_flow_item_ipv6 *umask = item->mask;
384 const struct rte_flow_item_ipv6 *mask;
386 mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
388 if (mask->hdr.vtc_flow ||
389 mask->hdr.payload_len || mask->hdr.hop_limits)
390 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
392 "tc/flow/hop are not supported");
394 if (fs->mask.ethtype &&
395 (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
396 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
398 "Couldn't find IPv6 ethertype");
399 fs->type = FILTER_TYPE_IPV6;
401 return 0; /* ipv6 wild card */
403 CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
404 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
405 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
411 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
412 struct rte_flow_error *e)
415 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
416 attr, "attribute:<egress> is"
419 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
420 attr, "group parameter is"
423 flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
428 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
430 struct port_info *pi = ethdev2pinfo(dev);
432 if (rxq > pi->n_rx_qsets)
437 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
439 struct adapter *adap = ethdev2adap(f->dev);
440 struct ch_filter_specification fs = f->fs;
443 if (fidx >= adap->tids.nftids) {
444 dev_err(adap, "invalid flow index %d.\n", fidx);
448 nentries = cxgbe_filter_slots(adap, fs.type);
449 if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
450 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
458 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
459 struct adapter *adap, unsigned int fidx)
463 nentries = cxgbe_filter_slots(adap, fs->type);
464 if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
465 dev_err(adap, "filter index: %d is busy.\n", fidx);
469 if (fidx >= adap->tids.nftids) {
470 dev_err(adap, "filter index (%u) >= max(%u)\n",
471 fidx, adap->tids.nftids);
479 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
482 return 0; /* Hash filters */
483 return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
484 cxgbe_validate_fidxonadd(&flow->fs,
485 ethdev2adap(flow->dev), fidx);
488 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
490 struct ch_filter_specification *fs = &flow->fs;
491 struct adapter *adap = ethdev2adap(flow->dev);
493 /* For tcam get the next available slot, if default value specified */
494 if (flow->fidx == FILTER_ID_MAX) {
498 nentries = cxgbe_filter_slots(adap, fs->type);
499 idx = cxgbe_alloc_ftid(adap, nentries);
501 dev_err(adap, "unable to get a filter index in tcam\n");
504 *fidx = (unsigned int)idx;
513 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
515 const struct rte_flow_item *i;
516 int j, index = -ENOENT;
518 for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
519 if (i->type == type) {
529 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
532 * BIT_0 = [src_ip], BIT_1 = [dst_ip]
533 * BIT_2 = [src_port], BIT_3 = [dst_port]
535 * Only below cases are supported as per our spec.
539 fs->nat_mode = NAT_MODE_NONE;
542 fs->nat_mode = NAT_MODE_DIP;
545 fs->nat_mode = NAT_MODE_SIP_SP;
548 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
551 fs->nat_mode = NAT_MODE_DIP_DP;
554 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
557 fs->nat_mode = NAT_MODE_DIP_DP_SP;
560 fs->nat_mode = NAT_MODE_ALL;
570 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
571 const struct rte_flow_item items[],
573 struct ch_filter_specification *fs,
574 struct rte_flow_error *e)
576 const struct rte_flow_action_of_set_vlan_vid *vlanid;
577 const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
578 const struct rte_flow_action_of_push_vlan *pushvlan;
579 const struct rte_flow_action_set_ipv4 *ipv4;
580 const struct rte_flow_action_set_ipv6 *ipv6;
581 const struct rte_flow_action_set_tp *tp_port;
582 const struct rte_flow_action_phy_port *port;
587 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
588 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
590 /* If explicitly asked to push a new VLAN header,
591 * then don't set rewrite mode. Otherwise, the
592 * incoming VLAN packets will get their VLAN fields
593 * rewritten, instead of adding an additional outer
596 if (fs->newvlan != VLAN_INSERT)
597 fs->newvlan = VLAN_REWRITE;
598 tmp_vlan = fs->vlan & 0xe000;
599 fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
601 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
602 vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
604 /* If explicitly asked to push a new VLAN header,
605 * then don't set rewrite mode. Otherwise, the
606 * incoming VLAN packets will get their VLAN fields
607 * rewritten, instead of adding an additional outer
610 if (fs->newvlan != VLAN_INSERT)
611 fs->newvlan = VLAN_REWRITE;
612 tmp_vlan = fs->vlan & 0xfff;
613 fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
615 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
616 pushvlan = (const struct rte_flow_action_of_push_vlan *)
618 if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
619 return rte_flow_error_set(e, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ACTION, a,
621 "only ethertype 0x8100 "
622 "supported for push vlan.");
623 fs->newvlan = VLAN_INSERT;
625 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
626 fs->newvlan = VLAN_REMOVE;
628 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
629 port = (const struct rte_flow_action_phy_port *)a->conf;
630 fs->eport = port->index;
632 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
633 item_index = cxgbe_get_flow_item_index(items,
634 RTE_FLOW_ITEM_TYPE_IPV4);
636 return rte_flow_error_set(e, EINVAL,
637 RTE_FLOW_ERROR_TYPE_ACTION, a,
638 "No RTE_FLOW_ITEM_TYPE_IPV4 "
641 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
642 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
645 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
646 item_index = cxgbe_get_flow_item_index(items,
647 RTE_FLOW_ITEM_TYPE_IPV4);
649 return rte_flow_error_set(e, EINVAL,
650 RTE_FLOW_ERROR_TYPE_ACTION, a,
651 "No RTE_FLOW_ITEM_TYPE_IPV4 "
654 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
655 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
658 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
659 item_index = cxgbe_get_flow_item_index(items,
660 RTE_FLOW_ITEM_TYPE_IPV6);
662 return rte_flow_error_set(e, EINVAL,
663 RTE_FLOW_ERROR_TYPE_ACTION, a,
664 "No RTE_FLOW_ITEM_TYPE_IPV6 "
667 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
668 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
671 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
672 item_index = cxgbe_get_flow_item_index(items,
673 RTE_FLOW_ITEM_TYPE_IPV6);
675 return rte_flow_error_set(e, EINVAL,
676 RTE_FLOW_ERROR_TYPE_ACTION, a,
677 "No RTE_FLOW_ITEM_TYPE_IPV6 "
680 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
681 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
684 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
685 item_index = cxgbe_get_flow_item_index(items,
686 RTE_FLOW_ITEM_TYPE_TCP);
687 if (item_index < 0) {
689 cxgbe_get_flow_item_index(items,
690 RTE_FLOW_ITEM_TYPE_UDP);
692 return rte_flow_error_set(e, EINVAL,
693 RTE_FLOW_ERROR_TYPE_ACTION, a,
694 "No RTE_FLOW_ITEM_TYPE_TCP or "
695 "RTE_FLOW_ITEM_TYPE_UDP found");
698 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
699 fs->nat_fport = be16_to_cpu(tp_port->port);
702 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
703 item_index = cxgbe_get_flow_item_index(items,
704 RTE_FLOW_ITEM_TYPE_TCP);
705 if (item_index < 0) {
707 cxgbe_get_flow_item_index(items,
708 RTE_FLOW_ITEM_TYPE_UDP);
710 return rte_flow_error_set(e, EINVAL,
711 RTE_FLOW_ERROR_TYPE_ACTION, a,
712 "No RTE_FLOW_ITEM_TYPE_TCP or "
713 "RTE_FLOW_ITEM_TYPE_UDP found");
716 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
717 fs->nat_lport = be16_to_cpu(tp_port->port);
720 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
721 item_index = cxgbe_get_flow_item_index(items,
722 RTE_FLOW_ITEM_TYPE_ETH);
724 return rte_flow_error_set(e, EINVAL,
725 RTE_FLOW_ERROR_TYPE_ACTION, a,
726 "No RTE_FLOW_ITEM_TYPE_ETH "
731 /* We are not supposed to come here */
732 return rte_flow_error_set(e, EINVAL,
733 RTE_FLOW_ERROR_TYPE_ACTION, a,
734 "Action not supported");
741 cxgbe_rtef_parse_actions(struct rte_flow *flow,
742 const struct rte_flow_item items[],
743 const struct rte_flow_action action[],
744 struct rte_flow_error *e)
746 struct ch_filter_specification *fs = &flow->fs;
747 uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
748 uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
749 const struct rte_flow_action_queue *q;
750 const struct rte_flow_action *a;
754 for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
756 case RTE_FLOW_ACTION_TYPE_VOID:
758 case RTE_FLOW_ACTION_TYPE_DROP:
760 return rte_flow_error_set(e, EINVAL,
761 RTE_FLOW_ERROR_TYPE_ACTION, a,
762 "specify only 1 pass/drop");
763 fs->action = FILTER_DROP;
765 case RTE_FLOW_ACTION_TYPE_QUEUE:
766 q = (const struct rte_flow_action_queue *)a->conf;
768 return rte_flow_error_set(e, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ACTION, q,
770 "specify rx queue index");
771 if (check_rxq(flow->dev, q->index))
772 return rte_flow_error_set(e, EINVAL,
773 RTE_FLOW_ERROR_TYPE_ACTION, q,
776 return rte_flow_error_set(e, EINVAL,
777 RTE_FLOW_ERROR_TYPE_ACTION, a,
778 "specify only 1 pass/drop");
779 fs->action = FILTER_PASS;
783 case RTE_FLOW_ACTION_TYPE_COUNT:
786 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
789 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
792 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
793 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
794 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
795 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
796 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
797 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
800 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
801 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
804 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
805 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
807 /* We allow multiple switch actions, but switch is
808 * not compatible with either queue or drop
810 if (abit++ && fs->action != FILTER_SWITCH)
811 return rte_flow_error_set(e, EINVAL,
812 RTE_FLOW_ERROR_TYPE_ACTION, a,
813 "overlapping action specified");
814 if (nat_ipv4 && nat_ipv6)
815 return rte_flow_error_set(e, EINVAL,
816 RTE_FLOW_ERROR_TYPE_ACTION, a,
817 "Can't have one address ipv4 and the"
820 ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
824 fs->action = FILTER_SWITCH;
827 /* Not supported action : return error */
828 return rte_flow_error_set(e, ENOTSUP,
829 RTE_FLOW_ERROR_TYPE_ACTION,
830 a, "Action not supported");
834 if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
835 return rte_flow_error_set(e, EINVAL,
836 RTE_FLOW_ERROR_TYPE_ACTION, a,
837 "Both OF_SET_VLAN_VID and "
838 "OF_SET_VLAN_PCP must be specified");
840 if (ch_rte_parse_nat(nmode, fs))
841 return rte_flow_error_set(e, EINVAL,
842 RTE_FLOW_ERROR_TYPE_ACTION, a,
843 "invalid settings for swich action");
847 static struct chrte_fparse parseitem[] = {
848 [RTE_FLOW_ITEM_TYPE_ETH] = {
849 .fptr = ch_rte_parsetype_eth,
850 .dmask = &(const struct rte_flow_item_eth){
851 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
852 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
857 [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
858 .fptr = ch_rte_parsetype_port,
859 .dmask = &(const struct rte_flow_item_phy_port){
864 [RTE_FLOW_ITEM_TYPE_VLAN] = {
865 .fptr = ch_rte_parsetype_vlan,
866 .dmask = &(const struct rte_flow_item_vlan){
868 .inner_type = 0xffff,
872 [RTE_FLOW_ITEM_TYPE_IPV4] = {
873 .fptr = ch_rte_parsetype_ipv4,
874 .dmask = &rte_flow_item_ipv4_mask,
877 [RTE_FLOW_ITEM_TYPE_IPV6] = {
878 .fptr = ch_rte_parsetype_ipv6,
879 .dmask = &rte_flow_item_ipv6_mask,
882 [RTE_FLOW_ITEM_TYPE_UDP] = {
883 .fptr = ch_rte_parsetype_udp,
884 .dmask = &rte_flow_item_udp_mask,
887 [RTE_FLOW_ITEM_TYPE_TCP] = {
888 .fptr = ch_rte_parsetype_tcp,
889 .dmask = &rte_flow_item_tcp_mask,
894 cxgbe_rtef_parse_items(struct rte_flow *flow,
895 const struct rte_flow_item items[],
896 struct rte_flow_error *e)
898 struct adapter *adap = ethdev2adap(flow->dev);
899 const struct rte_flow_item *i;
900 char repeat[ARRAY_SIZE(parseitem)] = {0};
902 for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
903 struct chrte_fparse *idx;
906 if (i->type >= ARRAY_SIZE(parseitem))
907 return rte_flow_error_set(e, ENOTSUP,
908 RTE_FLOW_ERROR_TYPE_ITEM,
909 i, "Item not supported");
912 case RTE_FLOW_ITEM_TYPE_VOID:
915 /* check if item is repeated */
916 if (repeat[i->type] &&
917 i->type != RTE_FLOW_ITEM_TYPE_VLAN)
918 return rte_flow_error_set(e, ENOTSUP,
919 RTE_FLOW_ERROR_TYPE_ITEM, i,
920 "parse items cannot be repeated(except void/vlan)");
924 /* No spec found for this pattern item. Skip it */
928 /* validate the item */
929 ret = cxgbe_validate_item(i, e);
933 idx = &flow->item_parser[i->type];
934 if (!idx || !idx->fptr) {
935 return rte_flow_error_set(e, ENOTSUP,
936 RTE_FLOW_ERROR_TYPE_ITEM, i,
937 "Item not supported");
939 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
946 cxgbe_fill_filter_region(adap, &flow->fs);
947 cxgbe_tweak_filter_spec(adap, &flow->fs);
953 cxgbe_flow_parse(struct rte_flow *flow,
954 const struct rte_flow_attr *attr,
955 const struct rte_flow_item item[],
956 const struct rte_flow_action action[],
957 struct rte_flow_error *e)
960 /* parse user request into ch_filter_specification */
961 ret = cxgbe_rtef_parse_attr(flow, attr, e);
964 ret = cxgbe_rtef_parse_items(flow, item, e);
967 return cxgbe_rtef_parse_actions(flow, item, action, e);
970 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
972 struct ch_filter_specification *fs = &flow->fs;
973 struct adapter *adap = ethdev2adap(dev);
974 struct tid_info *t = &adap->tids;
975 struct filter_ctx ctx;
979 if (cxgbe_get_fidx(flow, &fidx))
981 if (cxgbe_verify_fidx(flow, fidx, 0))
984 t4_init_completion(&ctx.completion);
985 /* go create the filter */
986 err = cxgbe_set_filter(dev, fidx, fs, &ctx);
988 dev_err(adap, "Error %d while creating filter.\n", err);
992 /* Poll the FW for reply */
993 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
998 dev_err(adap, "Filter set operation timed out (%d)\n", err);
1002 dev_err(adap, "Hardware error %d while creating the filter.\n",
1007 if (fs->cap) { /* to destroy the filter */
1008 flow->fidx = ctx.tid;
1009 flow->f = lookup_tid(t, ctx.tid);
1012 flow->f = &adap->tids.ftid_tab[fidx];
1018 static struct rte_flow *
1019 cxgbe_flow_create(struct rte_eth_dev *dev,
1020 const struct rte_flow_attr *attr,
1021 const struct rte_flow_item item[],
1022 const struct rte_flow_action action[],
1023 struct rte_flow_error *e)
1025 struct adapter *adap = ethdev2adap(dev);
1026 struct rte_flow *flow;
1029 flow = t4_os_alloc(sizeof(struct rte_flow));
1031 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1032 NULL, "Unable to allocate memory for"
1037 flow->item_parser = parseitem;
1039 flow->fs.private = (void *)flow;
1041 if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1046 t4_os_lock(&adap->flow_lock);
1047 /* go, interact with cxgbe_filter */
1048 ret = __cxgbe_flow_create(dev, flow);
1049 t4_os_unlock(&adap->flow_lock);
1051 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1052 NULL, "Unable to create flow rule");
1057 flow->f->private = flow; /* Will be used during flush */
1062 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1064 struct adapter *adap = ethdev2adap(dev);
1065 struct filter_entry *f = flow->f;
1066 struct ch_filter_specification *fs;
1067 struct filter_ctx ctx;
1071 if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1074 t4_init_completion(&ctx.completion);
1075 err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1077 dev_err(adap, "Error %d while deleting filter.\n", err);
1081 /* Poll the FW for reply */
1082 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1084 CXGBE_FLOW_POLL_CNT,
1087 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1091 dev_err(adap, "Hardware error %d while deleting the filter.\n",
1097 if (fs->mask.macidx) {
1098 struct port_info *pi = (struct port_info *)
1099 (dev->data->dev_private);
1102 ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
1111 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1112 struct rte_flow_error *e)
1114 struct adapter *adap = ethdev2adap(dev);
1117 t4_os_lock(&adap->flow_lock);
1118 ret = __cxgbe_flow_destroy(dev, flow);
1119 t4_os_unlock(&adap->flow_lock);
1121 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1122 flow, "error destroying filter.");
1127 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1130 struct adapter *adap = ethdev2adap(flow->dev);
1131 struct ch_filter_specification fs = flow->f->fs;
1132 unsigned int fidx = flow->fidx;
1135 ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1138 return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1142 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1143 const struct rte_flow_action *action, void *data,
1144 struct rte_flow_error *e)
1146 struct adapter *adap = ethdev2adap(flow->dev);
1147 struct ch_filter_specification fs;
1148 struct rte_flow_query_count *c;
1149 struct filter_entry *f;
1157 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1158 return rte_flow_error_set(e, ENOTSUP,
1159 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1160 "only count supported for query");
1163 * This is a valid operation, Since we are allowed to do chelsio
1164 * specific operations in rte side of our code but not vise-versa
1166 * So, fs can be queried/modified here BUT rte_flow_query_count
1167 * cannot be worked on by the lower layer since we want to maintain
1168 * it as rte_flow agnostic.
1171 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1172 &fs, "filter hit counters were not"
1173 " enabled during filter creation");
1175 c = (struct rte_flow_query_count *)data;
1177 t4_os_lock(&adap->flow_lock);
1178 ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1180 rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1181 f, "cxgbe pmd failed to perform query");
1185 /* Query was successful */
1189 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1192 t4_os_unlock(&adap->flow_lock);
1197 cxgbe_flow_validate(struct rte_eth_dev *dev,
1198 const struct rte_flow_attr *attr,
1199 const struct rte_flow_item item[],
1200 const struct rte_flow_action action[],
1201 struct rte_flow_error *e)
1203 struct adapter *adap = ethdev2adap(dev);
1204 struct rte_flow *flow;
1208 flow = t4_os_alloc(sizeof(struct rte_flow));
1210 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1212 "Unable to allocate memory for filter_entry");
1214 flow->item_parser = parseitem;
1217 ret = cxgbe_flow_parse(flow, attr, item, action, e);
1223 if (cxgbe_validate_filter(adap, &flow->fs)) {
1225 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1227 "validation failed. Check f/w config file.");
1230 t4_os_lock(&adap->flow_lock);
1231 if (cxgbe_get_fidx(flow, &fidx)) {
1232 ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1233 NULL, "no memory in tcam.");
1237 if (cxgbe_verify_fidx(flow, fidx, 0)) {
1238 ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1239 NULL, "validation failed");
1244 t4_os_unlock(&adap->flow_lock);
1250 * @ret : > 0 filter destroyed succsesfully
1251 * < 0 error destroying filter
1252 * == 1 filter not active / not found
1255 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1257 if (f && (f->valid || f->pending) &&
1258 f->dev == dev && /* Only if user has asked for this port */
1259 f->private) /* We (rte_flow) created this filter */
1260 return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1264 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1266 struct adapter *adap = ethdev2adap(dev);
1270 t4_os_lock(&adap->flow_lock);
1271 if (adap->tids.ftid_tab) {
1272 struct filter_entry *f = &adap->tids.ftid_tab[0];
1274 for (i = 0; i < adap->tids.nftids; i++, f++) {
1275 ret = cxgbe_check_n_destroy(f, dev);
1277 rte_flow_error_set(e, ret,
1278 RTE_FLOW_ERROR_TYPE_HANDLE,
1280 "error destroying TCAM "
1287 if (is_hashfilter(adap) && adap->tids.tid_tab) {
1288 struct filter_entry *f;
1290 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1291 f = (struct filter_entry *)adap->tids.tid_tab[i];
1293 ret = cxgbe_check_n_destroy(f, dev);
1295 rte_flow_error_set(e, ret,
1296 RTE_FLOW_ERROR_TYPE_HANDLE,
1298 "error destroying HASH "
1306 t4_os_unlock(&adap->flow_lock);
1307 return ret >= 0 ? 0 : ret;
1310 static const struct rte_flow_ops cxgbe_flow_ops = {
1311 .validate = cxgbe_flow_validate,
1312 .create = cxgbe_flow_create,
1313 .destroy = cxgbe_flow_destroy,
1314 .flush = cxgbe_flow_flush,
1315 .query = cxgbe_flow_query,
1320 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1321 enum rte_filter_type filter_type,
1322 enum rte_filter_op filter_op,
1328 switch (filter_type) {
1329 case RTE_ETH_FILTER_GENERIC:
1330 if (filter_op != RTE_ETH_FILTER_GET)
1332 *(const void **)arg = &cxgbe_flow_ops;