1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
10 if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12 NULL, "Redefined match item with" \
13 " different values found"); \
14 (fs)->val.elem = (__v); \
15 (fs)->mask.elem = (__m); \
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
20 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 #define CXGBE_FILL_FS(v, m, elem) \
25 __CXGBE_FILL_FS(v, m, fs, elem, e)
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
33 /* rte_flow specification does not allow it. */
34 if (!i->spec && (i->mask || i->last))
35 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36 i, "last or mask given without spec");
38 * We don't support it.
39 * Although, we can support values in last as 0's or last == spec.
40 * But this will not provide user with any additional functionality
41 * and will only increase the complexity for us.
44 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45 i, "last is not supported by chelsio pmd");
50 * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51 * there's only 40-bits available to store match fields.
52 * So, to save space, optimize filter spec for some common
53 * known fields that hardware can parse against incoming
54 * packets automatically.
57 cxgbe_tweak_filter_spec(struct adapter *adap,
58 struct ch_filter_specification *fs)
60 /* Save 16-bit ethertype field space, by setting corresponding
61 * 1-bit flags in the filter spec for common known ethertypes.
62 * When hardware sees these flags, it automatically infers and
63 * matches incoming packets against the corresponding ethertype.
65 if (fs->mask.ethtype == 0xffff) {
66 switch (fs->val.ethtype) {
67 case RTE_ETHER_TYPE_IPV4:
68 if (adap->params.tp.ethertype_shift < 0) {
69 fs->type = FILTER_TYPE_IPV4;
74 case RTE_ETHER_TYPE_IPV6:
75 if (adap->params.tp.ethertype_shift < 0) {
76 fs->type = FILTER_TYPE_IPV6;
81 case RTE_ETHER_TYPE_VLAN:
82 if (adap->params.tp.ethertype_shift < 0 &&
83 adap->params.tp.vlan_shift >= 0) {
84 fs->val.ivlan_vld = 1;
85 fs->mask.ivlan_vld = 1;
90 case RTE_ETHER_TYPE_QINQ:
91 if (adap->params.tp.ethertype_shift < 0 &&
92 adap->params.tp.vnic_shift >= 0) {
93 fs->val.ovlan_vld = 1;
94 fs->mask.ovlan_vld = 1;
106 cxgbe_fill_filter_region(struct adapter *adap,
107 struct ch_filter_specification *fs)
109 struct tp_params *tp = &adap->params.tp;
110 u64 hash_filter_mask = tp->hash_filter_mask;
115 if (!is_hashfilter(adap))
119 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
120 0xff, 0xff, 0xff, 0xff,
121 0xff, 0xff, 0xff, 0xff,
122 0xff, 0xff, 0xff, 0xff};
123 uint8_t bitoff[16] = {0};
125 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
126 !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
127 memcmp(fs->mask.lip, biton, sizeof(biton)) ||
128 memcmp(fs->mask.fip, biton, sizeof(biton)))
131 uint32_t biton = 0xffffffff;
132 uint32_t bitoff = 0x0U;
134 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
135 !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
136 memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
137 memcmp(fs->mask.fip, &biton, sizeof(biton)))
141 if (!fs->val.lport || fs->mask.lport != 0xffff)
143 if (!fs->val.fport || fs->mask.fport != 0xffff)
146 if (tp->protocol_shift >= 0)
147 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
148 if (tp->ethertype_shift >= 0)
149 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
150 if (tp->port_shift >= 0)
151 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
152 if (tp->macmatch_shift >= 0)
153 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
154 if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
155 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
157 if (tp->vnic_shift >= 0) {
158 if (fs->mask.ovlan_vld)
159 ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
160 fs->mask.ovlan) << tp->vnic_shift;
161 else if (fs->mask.pfvf_vld)
162 ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 |
164 fs->mask.vf) << tp->vnic_shift;
166 if (tp->tos_shift >= 0)
167 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
169 if (ntuple_mask != hash_filter_mask)
172 fs->cap = 1; /* use hash region */
176 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
177 struct ch_filter_specification *fs,
178 struct rte_flow_error *e)
180 const struct rte_flow_item_eth *spec = item->spec;
181 const struct rte_flow_item_eth *umask = item->mask;
182 const struct rte_flow_item_eth *mask;
184 /* If user has not given any mask, then use chelsio supported mask. */
185 mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
190 /* we don't support SRC_MAC filtering*/
191 if (!rte_is_zero_ether_addr(&mask->src))
192 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
194 "src mac filtering not supported");
196 if (!rte_is_zero_ether_addr(&mask->dst)) {
197 CXGBE_FILL_FS(0, 0x1ff, macidx);
198 CXGBE_FILL_FS_MEMCPY(spec->dst.addr_bytes, mask->dst.addr_bytes,
202 CXGBE_FILL_FS(be16_to_cpu(spec->type),
203 be16_to_cpu(mask->type), ethtype);
209 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
210 struct ch_filter_specification *fs,
211 struct rte_flow_error *e)
213 const struct rte_flow_item_phy_port *val = item->spec;
214 const struct rte_flow_item_phy_port *umask = item->mask;
215 const struct rte_flow_item_phy_port *mask;
217 mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
220 return 0; /* Wildcard, match all physical ports */
222 if (val->index > 0x7)
223 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
225 "port index up to 0x7 is supported");
227 CXGBE_FILL_FS(val->index, mask->index, iport);
233 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
234 struct ch_filter_specification *fs,
235 struct rte_flow_error *e)
237 const struct rte_flow_item_vlan *spec = item->spec;
238 const struct rte_flow_item_vlan *umask = item->mask;
239 const struct rte_flow_item_vlan *mask;
241 /* If user has not given any mask, then use chelsio supported mask. */
242 mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
244 if (!fs->mask.ethtype)
245 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
247 "Can't parse VLAN item without knowing ethertype");
249 /* If ethertype is already set and is not VLAN (0x8100) or
250 * QINQ(0x88A8), then don't proceed further. Otherwise,
251 * reset the outer ethertype, so that it can be replaced by
252 * innermost ethertype. Note that hardware will automatically
253 * match against VLAN or QINQ packets, based on 'ivlan_vld' or
254 * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
256 if (fs->mask.ethtype) {
257 if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
258 fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
259 return rte_flow_error_set(e, EINVAL,
260 RTE_FLOW_ERROR_TYPE_ITEM,
262 "Ethertype must be 0x8100 or 0x88a8");
265 if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
266 CXGBE_FILL_FS(1, 1, ovlan_vld);
268 CXGBE_FILL_FS(be16_to_cpu(spec->tci),
269 be16_to_cpu(mask->tci), ovlan);
271 fs->mask.ethtype = 0;
274 } else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {
275 CXGBE_FILL_FS(1, 1, ivlan_vld);
277 CXGBE_FILL_FS(be16_to_cpu(spec->tci),
278 be16_to_cpu(mask->tci), ivlan);
280 fs->mask.ethtype = 0;
286 CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
287 be16_to_cpu(mask->inner_type), ethtype);
293 ch_rte_parsetype_pf(const void *dmask __rte_unused,
294 const struct rte_flow_item *item __rte_unused,
295 struct ch_filter_specification *fs,
296 struct rte_flow_error *e __rte_unused)
298 struct rte_flow *flow = (struct rte_flow *)fs->private;
299 struct rte_eth_dev *dev = flow->dev;
300 struct adapter *adap = ethdev2adap(dev);
302 CXGBE_FILL_FS(1, 1, pfvf_vld);
304 CXGBE_FILL_FS(adap->pf, 0x7, pf);
309 ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item,
310 struct ch_filter_specification *fs,
311 struct rte_flow_error *e)
313 const struct rte_flow_item_vf *umask = item->mask;
314 const struct rte_flow_item_vf *val = item->spec;
315 const struct rte_flow_item_vf *mask;
317 /* If user has not given any mask, then use chelsio supported mask. */
318 mask = umask ? umask : (const struct rte_flow_item_vf *)dmask;
320 CXGBE_FILL_FS(1, 1, pfvf_vld);
323 return 0; /* Wildcard, match all Vf */
325 if (val->id > UCHAR_MAX)
326 return rte_flow_error_set(e, EINVAL,
327 RTE_FLOW_ERROR_TYPE_ITEM,
331 CXGBE_FILL_FS(val->id, mask->id, vf);
337 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
338 struct ch_filter_specification *fs,
339 struct rte_flow_error *e)
341 const struct rte_flow_item_udp *val = item->spec;
342 const struct rte_flow_item_udp *umask = item->mask;
343 const struct rte_flow_item_udp *mask;
345 mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
347 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
348 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
350 "udp: only src/dst port supported");
352 CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
355 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
356 be16_to_cpu(mask->hdr.src_port), fport);
357 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
358 be16_to_cpu(mask->hdr.dst_port), lport);
363 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
364 struct ch_filter_specification *fs,
365 struct rte_flow_error *e)
367 const struct rte_flow_item_tcp *val = item->spec;
368 const struct rte_flow_item_tcp *umask = item->mask;
369 const struct rte_flow_item_tcp *mask;
371 mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
373 if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
374 mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
376 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
378 "tcp: only src/dst port supported");
380 CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
383 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
384 be16_to_cpu(mask->hdr.src_port), fport);
385 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
386 be16_to_cpu(mask->hdr.dst_port), lport);
391 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
392 struct ch_filter_specification *fs,
393 struct rte_flow_error *e)
395 const struct rte_flow_item_ipv4 *val = item->spec;
396 const struct rte_flow_item_ipv4 *umask = item->mask;
397 const struct rte_flow_item_ipv4 *mask;
399 mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
401 if (mask->hdr.time_to_live)
402 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
403 item, "ttl is not supported");
405 if (fs->mask.ethtype &&
406 (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
407 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
409 "Couldn't find IPv4 ethertype");
410 fs->type = FILTER_TYPE_IPV4;
412 return 0; /* ipv4 wild card */
414 CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
415 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
416 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
417 CXGBE_FILL_FS(val->hdr.type_of_service, mask->hdr.type_of_service, tos);
423 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
424 struct ch_filter_specification *fs,
425 struct rte_flow_error *e)
427 const struct rte_flow_item_ipv6 *val = item->spec;
428 const struct rte_flow_item_ipv6 *umask = item->mask;
429 const struct rte_flow_item_ipv6 *mask;
430 u32 vtc_flow, vtc_flow_mask;
432 mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
434 vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
436 if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
437 mask->hdr.payload_len || mask->hdr.hop_limits)
438 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
440 "flow/hop are not supported");
442 if (fs->mask.ethtype &&
443 (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
444 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
446 "Couldn't find IPv6 ethertype");
447 fs->type = FILTER_TYPE_IPV6;
449 return 0; /* ipv6 wild card */
451 CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
453 vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
454 CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
455 RTE_IPV6_HDR_TC_SHIFT,
456 (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
457 RTE_IPV6_HDR_TC_SHIFT,
460 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
461 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
467 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
468 struct rte_flow_error *e)
471 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
472 attr, "attribute:<egress> is"
475 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
476 attr, "group parameter is"
479 flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
484 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
486 struct port_info *pi = ethdev2pinfo(dev);
488 if (rxq > pi->n_rx_qsets)
493 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
495 struct adapter *adap = ethdev2adap(f->dev);
496 struct ch_filter_specification fs = f->fs;
499 if (fidx >= adap->tids.nftids) {
500 dev_err(adap, "invalid flow index %d.\n", fidx);
504 nentries = cxgbe_filter_slots(adap, fs.type);
505 if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
506 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
514 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
515 struct adapter *adap, unsigned int fidx)
519 nentries = cxgbe_filter_slots(adap, fs->type);
520 if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
521 dev_err(adap, "filter index: %d is busy.\n", fidx);
525 if (fidx >= adap->tids.nftids) {
526 dev_err(adap, "filter index (%u) >= max(%u)\n",
527 fidx, adap->tids.nftids);
535 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
538 return 0; /* Hash filters */
539 return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
540 cxgbe_validate_fidxonadd(&flow->fs,
541 ethdev2adap(flow->dev), fidx);
544 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
546 struct ch_filter_specification *fs = &flow->fs;
547 struct adapter *adap = ethdev2adap(flow->dev);
549 /* For tcam get the next available slot, if default value specified */
550 if (flow->fidx == FILTER_ID_MAX) {
554 nentries = cxgbe_filter_slots(adap, fs->type);
555 idx = cxgbe_alloc_ftid(adap, nentries);
557 dev_err(adap, "unable to get a filter index in tcam\n");
560 *fidx = (unsigned int)idx;
569 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
571 const struct rte_flow_item *i;
572 int j, index = -ENOENT;
574 for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
575 if (i->type == type) {
585 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
588 * BIT_0 = [src_ip], BIT_1 = [dst_ip]
589 * BIT_2 = [src_port], BIT_3 = [dst_port]
591 * Only below cases are supported as per our spec.
595 fs->nat_mode = NAT_MODE_NONE;
598 fs->nat_mode = NAT_MODE_DIP;
601 fs->nat_mode = NAT_MODE_SIP_SP;
604 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
607 fs->nat_mode = NAT_MODE_DIP_DP;
610 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
613 fs->nat_mode = NAT_MODE_DIP_DP_SP;
616 fs->nat_mode = NAT_MODE_ALL;
626 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
627 const struct rte_flow_item items[],
629 struct ch_filter_specification *fs,
630 struct rte_flow_error *e)
632 const struct rte_flow_action_of_set_vlan_vid *vlanid;
633 const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
634 const struct rte_flow_action_of_push_vlan *pushvlan;
635 const struct rte_flow_action_set_ipv4 *ipv4;
636 const struct rte_flow_action_set_ipv6 *ipv6;
637 const struct rte_flow_action_set_tp *tp_port;
638 const struct rte_flow_action_phy_port *port;
639 const struct rte_flow_action_set_mac *mac;
644 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
645 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
647 /* If explicitly asked to push a new VLAN header,
648 * then don't set rewrite mode. Otherwise, the
649 * incoming VLAN packets will get their VLAN fields
650 * rewritten, instead of adding an additional outer
653 if (fs->newvlan != VLAN_INSERT)
654 fs->newvlan = VLAN_REWRITE;
655 tmp_vlan = fs->vlan & 0xe000;
656 fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
658 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
659 vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
661 /* If explicitly asked to push a new VLAN header,
662 * then don't set rewrite mode. Otherwise, the
663 * incoming VLAN packets will get their VLAN fields
664 * rewritten, instead of adding an additional outer
667 if (fs->newvlan != VLAN_INSERT)
668 fs->newvlan = VLAN_REWRITE;
669 tmp_vlan = fs->vlan & 0xfff;
670 fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
672 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
673 pushvlan = (const struct rte_flow_action_of_push_vlan *)
675 if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
676 return rte_flow_error_set(e, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ACTION, a,
678 "only ethertype 0x8100 "
679 "supported for push vlan.");
680 fs->newvlan = VLAN_INSERT;
682 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
683 fs->newvlan = VLAN_REMOVE;
685 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
686 port = (const struct rte_flow_action_phy_port *)a->conf;
687 fs->eport = port->index;
689 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
690 item_index = cxgbe_get_flow_item_index(items,
691 RTE_FLOW_ITEM_TYPE_IPV4);
693 return rte_flow_error_set(e, EINVAL,
694 RTE_FLOW_ERROR_TYPE_ACTION, a,
695 "No RTE_FLOW_ITEM_TYPE_IPV4 "
698 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
699 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
702 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
703 item_index = cxgbe_get_flow_item_index(items,
704 RTE_FLOW_ITEM_TYPE_IPV4);
706 return rte_flow_error_set(e, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ACTION, a,
708 "No RTE_FLOW_ITEM_TYPE_IPV4 "
711 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
712 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
715 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
716 item_index = cxgbe_get_flow_item_index(items,
717 RTE_FLOW_ITEM_TYPE_IPV6);
719 return rte_flow_error_set(e, EINVAL,
720 RTE_FLOW_ERROR_TYPE_ACTION, a,
721 "No RTE_FLOW_ITEM_TYPE_IPV6 "
724 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
725 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
728 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
729 item_index = cxgbe_get_flow_item_index(items,
730 RTE_FLOW_ITEM_TYPE_IPV6);
732 return rte_flow_error_set(e, EINVAL,
733 RTE_FLOW_ERROR_TYPE_ACTION, a,
734 "No RTE_FLOW_ITEM_TYPE_IPV6 "
737 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
738 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
741 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
742 item_index = cxgbe_get_flow_item_index(items,
743 RTE_FLOW_ITEM_TYPE_TCP);
744 if (item_index < 0) {
746 cxgbe_get_flow_item_index(items,
747 RTE_FLOW_ITEM_TYPE_UDP);
749 return rte_flow_error_set(e, EINVAL,
750 RTE_FLOW_ERROR_TYPE_ACTION, a,
751 "No RTE_FLOW_ITEM_TYPE_TCP or "
752 "RTE_FLOW_ITEM_TYPE_UDP found");
755 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
756 fs->nat_fport = be16_to_cpu(tp_port->port);
759 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
760 item_index = cxgbe_get_flow_item_index(items,
761 RTE_FLOW_ITEM_TYPE_TCP);
762 if (item_index < 0) {
764 cxgbe_get_flow_item_index(items,
765 RTE_FLOW_ITEM_TYPE_UDP);
767 return rte_flow_error_set(e, EINVAL,
768 RTE_FLOW_ERROR_TYPE_ACTION, a,
769 "No RTE_FLOW_ITEM_TYPE_TCP or "
770 "RTE_FLOW_ITEM_TYPE_UDP found");
773 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
774 fs->nat_lport = be16_to_cpu(tp_port->port);
777 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
778 item_index = cxgbe_get_flow_item_index(items,
779 RTE_FLOW_ITEM_TYPE_ETH);
781 return rte_flow_error_set(e, EINVAL,
782 RTE_FLOW_ERROR_TYPE_ACTION, a,
783 "No RTE_FLOW_ITEM_TYPE_ETH "
787 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
788 item_index = cxgbe_get_flow_item_index(items,
789 RTE_FLOW_ITEM_TYPE_ETH);
791 return rte_flow_error_set(e, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ACTION, a,
793 "No RTE_FLOW_ITEM_TYPE_ETH "
795 mac = (const struct rte_flow_action_set_mac *)a->conf;
798 memcpy(fs->smac, mac->mac_addr, sizeof(fs->smac));
800 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
801 item_index = cxgbe_get_flow_item_index(items,
802 RTE_FLOW_ITEM_TYPE_ETH);
804 return rte_flow_error_set(e, EINVAL,
805 RTE_FLOW_ERROR_TYPE_ACTION, a,
806 "No RTE_FLOW_ITEM_TYPE_ETH found");
807 mac = (const struct rte_flow_action_set_mac *)a->conf;
810 memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac));
813 /* We are not supposed to come here */
814 return rte_flow_error_set(e, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ACTION, a,
816 "Action not supported");
823 cxgbe_rtef_parse_actions(struct rte_flow *flow,
824 const struct rte_flow_item items[],
825 const struct rte_flow_action action[],
826 struct rte_flow_error *e)
828 struct ch_filter_specification *fs = &flow->fs;
829 uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
830 uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
831 const struct rte_flow_action_queue *q;
832 const struct rte_flow_action *a;
836 for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
838 case RTE_FLOW_ACTION_TYPE_VOID:
840 case RTE_FLOW_ACTION_TYPE_DROP:
842 return rte_flow_error_set(e, EINVAL,
843 RTE_FLOW_ERROR_TYPE_ACTION, a,
844 "specify only 1 pass/drop");
845 fs->action = FILTER_DROP;
847 case RTE_FLOW_ACTION_TYPE_QUEUE:
848 q = (const struct rte_flow_action_queue *)a->conf;
850 return rte_flow_error_set(e, EINVAL,
851 RTE_FLOW_ERROR_TYPE_ACTION, q,
852 "specify rx queue index");
853 if (check_rxq(flow->dev, q->index))
854 return rte_flow_error_set(e, EINVAL,
855 RTE_FLOW_ERROR_TYPE_ACTION, q,
858 return rte_flow_error_set(e, EINVAL,
859 RTE_FLOW_ERROR_TYPE_ACTION, a,
860 "specify only 1 pass/drop");
861 fs->action = FILTER_PASS;
865 case RTE_FLOW_ACTION_TYPE_COUNT:
868 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
871 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
874 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
875 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
876 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
877 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
878 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
879 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
882 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
883 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
886 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
887 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
888 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
889 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
891 /* We allow multiple switch actions, but switch is
892 * not compatible with either queue or drop
894 if (abit++ && fs->action != FILTER_SWITCH)
895 return rte_flow_error_set(e, EINVAL,
896 RTE_FLOW_ERROR_TYPE_ACTION, a,
897 "overlapping action specified");
898 if (nat_ipv4 && nat_ipv6)
899 return rte_flow_error_set(e, EINVAL,
900 RTE_FLOW_ERROR_TYPE_ACTION, a,
901 "Can't have one address ipv4 and the"
904 ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
908 fs->action = FILTER_SWITCH;
911 /* Not supported action : return error */
912 return rte_flow_error_set(e, ENOTSUP,
913 RTE_FLOW_ERROR_TYPE_ACTION,
914 a, "Action not supported");
918 if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
919 return rte_flow_error_set(e, EINVAL,
920 RTE_FLOW_ERROR_TYPE_ACTION, a,
921 "Both OF_SET_VLAN_VID and "
922 "OF_SET_VLAN_PCP must be specified");
924 if (ch_rte_parse_nat(nmode, fs))
925 return rte_flow_error_set(e, EINVAL,
926 RTE_FLOW_ERROR_TYPE_ACTION, a,
927 "invalid settings for swich action");
931 static struct chrte_fparse parseitem[] = {
932 [RTE_FLOW_ITEM_TYPE_ETH] = {
933 .fptr = ch_rte_parsetype_eth,
934 .dmask = &(const struct rte_flow_item_eth){
935 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
936 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
941 [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
942 .fptr = ch_rte_parsetype_port,
943 .dmask = &(const struct rte_flow_item_phy_port){
948 [RTE_FLOW_ITEM_TYPE_VLAN] = {
949 .fptr = ch_rte_parsetype_vlan,
950 .dmask = &(const struct rte_flow_item_vlan){
952 .inner_type = 0xffff,
956 [RTE_FLOW_ITEM_TYPE_IPV4] = {
957 .fptr = ch_rte_parsetype_ipv4,
958 .dmask = &(const struct rte_flow_item_ipv4) {
960 .src_addr = RTE_BE32(0xffffffff),
961 .dst_addr = RTE_BE32(0xffffffff),
962 .type_of_service = 0xff,
967 [RTE_FLOW_ITEM_TYPE_IPV6] = {
968 .fptr = ch_rte_parsetype_ipv6,
969 .dmask = &(const struct rte_flow_item_ipv6) {
972 "\xff\xff\xff\xff\xff\xff\xff\xff"
973 "\xff\xff\xff\xff\xff\xff\xff\xff",
975 "\xff\xff\xff\xff\xff\xff\xff\xff"
976 "\xff\xff\xff\xff\xff\xff\xff\xff",
977 .vtc_flow = RTE_BE32(0xff000000),
982 [RTE_FLOW_ITEM_TYPE_UDP] = {
983 .fptr = ch_rte_parsetype_udp,
984 .dmask = &rte_flow_item_udp_mask,
987 [RTE_FLOW_ITEM_TYPE_TCP] = {
988 .fptr = ch_rte_parsetype_tcp,
989 .dmask = &rte_flow_item_tcp_mask,
992 [RTE_FLOW_ITEM_TYPE_PF] = {
993 .fptr = ch_rte_parsetype_pf,
997 [RTE_FLOW_ITEM_TYPE_VF] = {
998 .fptr = ch_rte_parsetype_vf,
999 .dmask = &(const struct rte_flow_item_vf){
1006 cxgbe_rtef_parse_items(struct rte_flow *flow,
1007 const struct rte_flow_item items[],
1008 struct rte_flow_error *e)
1010 struct adapter *adap = ethdev2adap(flow->dev);
1011 const struct rte_flow_item *i;
1012 char repeat[ARRAY_SIZE(parseitem)] = {0};
1014 for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
1015 struct chrte_fparse *idx;
1018 if (i->type >= ARRAY_SIZE(parseitem))
1019 return rte_flow_error_set(e, ENOTSUP,
1020 RTE_FLOW_ERROR_TYPE_ITEM,
1021 i, "Item not supported");
1024 case RTE_FLOW_ITEM_TYPE_VOID:
1027 /* check if item is repeated */
1028 if (repeat[i->type] &&
1029 i->type != RTE_FLOW_ITEM_TYPE_VLAN)
1030 return rte_flow_error_set(e, ENOTSUP,
1031 RTE_FLOW_ERROR_TYPE_ITEM, i,
1032 "parse items cannot be repeated(except void/vlan)");
1034 repeat[i->type] = 1;
1036 /* validate the item */
1037 ret = cxgbe_validate_item(i, e);
1041 idx = &flow->item_parser[i->type];
1042 if (!idx || !idx->fptr) {
1043 return rte_flow_error_set(e, ENOTSUP,
1044 RTE_FLOW_ERROR_TYPE_ITEM, i,
1045 "Item not supported");
1047 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
1054 cxgbe_fill_filter_region(adap, &flow->fs);
1055 cxgbe_tweak_filter_spec(adap, &flow->fs);
1061 cxgbe_flow_parse(struct rte_flow *flow,
1062 const struct rte_flow_attr *attr,
1063 const struct rte_flow_item item[],
1064 const struct rte_flow_action action[],
1065 struct rte_flow_error *e)
1068 /* parse user request into ch_filter_specification */
1069 ret = cxgbe_rtef_parse_attr(flow, attr, e);
1072 ret = cxgbe_rtef_parse_items(flow, item, e);
1075 return cxgbe_rtef_parse_actions(flow, item, action, e);
1078 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
1080 struct ch_filter_specification *fs = &flow->fs;
1081 struct adapter *adap = ethdev2adap(dev);
1082 struct tid_info *t = &adap->tids;
1083 struct filter_ctx ctx;
1087 if (cxgbe_get_fidx(flow, &fidx))
1089 if (cxgbe_verify_fidx(flow, fidx, 0))
1092 t4_init_completion(&ctx.completion);
1093 /* go create the filter */
1094 err = cxgbe_set_filter(dev, fidx, fs, &ctx);
1096 dev_err(adap, "Error %d while creating filter.\n", err);
1100 /* Poll the FW for reply */
1101 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1103 CXGBE_FLOW_POLL_CNT,
1106 dev_err(adap, "Filter set operation timed out (%d)\n", err);
1110 dev_err(adap, "Hardware error %d while creating the filter.\n",
1115 if (fs->cap) { /* to destroy the filter */
1116 flow->fidx = ctx.tid;
1117 flow->f = lookup_tid(t, ctx.tid);
1120 flow->f = &adap->tids.ftid_tab[fidx];
1126 static struct rte_flow *
1127 cxgbe_flow_create(struct rte_eth_dev *dev,
1128 const struct rte_flow_attr *attr,
1129 const struct rte_flow_item item[],
1130 const struct rte_flow_action action[],
1131 struct rte_flow_error *e)
1133 struct adapter *adap = ethdev2adap(dev);
1134 struct rte_flow *flow;
1137 flow = t4_os_alloc(sizeof(struct rte_flow));
1139 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1140 NULL, "Unable to allocate memory for"
1145 flow->item_parser = parseitem;
1147 flow->fs.private = (void *)flow;
1149 if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1154 t4_os_lock(&adap->flow_lock);
1155 /* go, interact with cxgbe_filter */
1156 ret = __cxgbe_flow_create(dev, flow);
1157 t4_os_unlock(&adap->flow_lock);
1159 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1160 NULL, "Unable to create flow rule");
1165 flow->f->private = flow; /* Will be used during flush */
1170 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1172 struct adapter *adap = ethdev2adap(dev);
1173 struct filter_entry *f = flow->f;
1174 struct ch_filter_specification *fs;
1175 struct filter_ctx ctx;
1179 if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1182 t4_init_completion(&ctx.completion);
1183 err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1185 dev_err(adap, "Error %d while deleting filter.\n", err);
1189 /* Poll the FW for reply */
1190 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1192 CXGBE_FLOW_POLL_CNT,
1195 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1199 dev_err(adap, "Hardware error %d while deleting the filter.\n",
1208 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1209 struct rte_flow_error *e)
1211 struct adapter *adap = ethdev2adap(dev);
1214 t4_os_lock(&adap->flow_lock);
1215 ret = __cxgbe_flow_destroy(dev, flow);
1216 t4_os_unlock(&adap->flow_lock);
1218 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1219 flow, "error destroying filter.");
1224 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1227 struct adapter *adap = ethdev2adap(flow->dev);
1228 struct ch_filter_specification fs = flow->f->fs;
1229 unsigned int fidx = flow->fidx;
1232 ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1235 return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1239 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1240 const struct rte_flow_action *action, void *data,
1241 struct rte_flow_error *e)
1243 struct adapter *adap = ethdev2adap(flow->dev);
1244 struct ch_filter_specification fs;
1245 struct rte_flow_query_count *c;
1246 struct filter_entry *f;
1254 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1255 return rte_flow_error_set(e, ENOTSUP,
1256 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1257 "only count supported for query");
1260 * This is a valid operation, Since we are allowed to do chelsio
1261 * specific operations in rte side of our code but not vise-versa
1263 * So, fs can be queried/modified here BUT rte_flow_query_count
1264 * cannot be worked on by the lower layer since we want to maintain
1265 * it as rte_flow agnostic.
1268 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1269 &fs, "filter hit counters were not"
1270 " enabled during filter creation");
1272 c = (struct rte_flow_query_count *)data;
1274 t4_os_lock(&adap->flow_lock);
1275 ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1277 rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1278 f, "cxgbe pmd failed to perform query");
1282 /* Query was successful */
1286 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1289 t4_os_unlock(&adap->flow_lock);
1294 cxgbe_flow_validate(struct rte_eth_dev *dev,
1295 const struct rte_flow_attr *attr,
1296 const struct rte_flow_item item[],
1297 const struct rte_flow_action action[],
1298 struct rte_flow_error *e)
1300 struct adapter *adap = ethdev2adap(dev);
1301 struct rte_flow *flow;
1305 flow = t4_os_alloc(sizeof(struct rte_flow));
1307 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1309 "Unable to allocate memory for filter_entry");
1311 flow->item_parser = parseitem;
1314 ret = cxgbe_flow_parse(flow, attr, item, action, e);
1320 if (cxgbe_validate_filter(adap, &flow->fs)) {
1322 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1324 "validation failed. Check f/w config file.");
1327 t4_os_lock(&adap->flow_lock);
1328 if (cxgbe_get_fidx(flow, &fidx)) {
1329 ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1330 NULL, "no memory in tcam.");
1334 if (cxgbe_verify_fidx(flow, fidx, 0)) {
1335 ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1336 NULL, "validation failed");
1341 t4_os_unlock(&adap->flow_lock);
1347 * @ret : > 0 filter destroyed succsesfully
1348 * < 0 error destroying filter
1349 * == 1 filter not active / not found
1352 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1354 if (f && (f->valid || f->pending) &&
1355 f->dev == dev && /* Only if user has asked for this port */
1356 f->private) /* We (rte_flow) created this filter */
1357 return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1361 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1363 struct adapter *adap = ethdev2adap(dev);
1367 t4_os_lock(&adap->flow_lock);
1368 if (adap->tids.ftid_tab) {
1369 struct filter_entry *f = &adap->tids.ftid_tab[0];
1371 for (i = 0; i < adap->tids.nftids; i++, f++) {
1372 ret = cxgbe_check_n_destroy(f, dev);
1374 rte_flow_error_set(e, ret,
1375 RTE_FLOW_ERROR_TYPE_HANDLE,
1377 "error destroying TCAM "
1384 if (is_hashfilter(adap) && adap->tids.tid_tab) {
1385 struct filter_entry *f;
1387 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1388 f = (struct filter_entry *)adap->tids.tid_tab[i];
1390 ret = cxgbe_check_n_destroy(f, dev);
1392 rte_flow_error_set(e, ret,
1393 RTE_FLOW_ERROR_TYPE_HANDLE,
1395 "error destroying HASH "
1403 t4_os_unlock(&adap->flow_lock);
1404 return ret >= 0 ? 0 : ret;
1407 static const struct rte_flow_ops cxgbe_flow_ops = {
1408 .validate = cxgbe_flow_validate,
1409 .create = cxgbe_flow_create,
1410 .destroy = cxgbe_flow_destroy,
1411 .flush = cxgbe_flow_flush,
1412 .query = cxgbe_flow_query,
1417 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1418 enum rte_filter_type filter_type,
1419 enum rte_filter_op filter_op,
1425 switch (filter_type) {
1426 case RTE_ETH_FILTER_GENERIC:
1427 if (filter_op != RTE_ETH_FILTER_GET)
1429 *(const void **)arg = &cxgbe_flow_ops;