1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
10 if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12 NULL, "Redefined match item with" \
13 " different values found"); \
14 (fs)->val.elem = (__v); \
15 (fs)->mask.elem = (__m); \
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
20 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 #define CXGBE_FILL_FS(v, m, elem) \
25 __CXGBE_FILL_FS(v, m, fs, elem, e)
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
33 /* rte_flow specification does not allow it. */
34 if (!i->spec && (i->mask || i->last))
35 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36 i, "last or mask given without spec");
38 * We don't support it.
39 * Although, we can support values in last as 0's or last == spec.
40 * But this will not provide user with any additional functionality
41 * and will only increase the complexity for us.
44 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45 i, "last is not supported by chelsio pmd");
50 * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51 * there's only 40-bits available to store match fields.
52 * So, to save space, optimize filter spec for some common
53 * known fields that hardware can parse against incoming
54 * packets automatically.
57 cxgbe_tweak_filter_spec(struct adapter *adap,
58 struct ch_filter_specification *fs)
60 /* Save 16-bit ethertype field space, by setting corresponding
61 * 1-bit flags in the filter spec for common known ethertypes.
62 * When hardware sees these flags, it automatically infers and
63 * matches incoming packets against the corresponding ethertype.
65 if (fs->mask.ethtype == 0xffff) {
66 switch (fs->val.ethtype) {
67 case RTE_ETHER_TYPE_IPV4:
68 if (adap->params.tp.ethertype_shift < 0) {
69 fs->type = FILTER_TYPE_IPV4;
74 case RTE_ETHER_TYPE_IPV6:
75 if (adap->params.tp.ethertype_shift < 0) {
76 fs->type = FILTER_TYPE_IPV6;
81 case RTE_ETHER_TYPE_VLAN:
82 if (adap->params.tp.ethertype_shift < 0 &&
83 adap->params.tp.vlan_shift >= 0) {
84 fs->val.ivlan_vld = 1;
85 fs->mask.ivlan_vld = 1;
90 case RTE_ETHER_TYPE_QINQ:
91 if (adap->params.tp.ethertype_shift < 0 &&
92 adap->params.tp.vnic_shift >= 0) {
93 fs->val.ovlan_vld = 1;
94 fs->mask.ovlan_vld = 1;
106 cxgbe_fill_filter_region(struct adapter *adap,
107 struct ch_filter_specification *fs)
109 struct tp_params *tp = &adap->params.tp;
110 u64 hash_filter_mask = tp->hash_filter_mask;
115 if (!is_hashfilter(adap))
119 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
120 0xff, 0xff, 0xff, 0xff,
121 0xff, 0xff, 0xff, 0xff,
122 0xff, 0xff, 0xff, 0xff};
123 uint8_t bitoff[16] = {0};
125 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
126 !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
127 memcmp(fs->mask.lip, biton, sizeof(biton)) ||
128 memcmp(fs->mask.fip, biton, sizeof(biton)))
131 uint32_t biton = 0xffffffff;
132 uint32_t bitoff = 0x0U;
134 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
135 !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
136 memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
137 memcmp(fs->mask.fip, &biton, sizeof(biton)))
141 if (!fs->val.lport || fs->mask.lport != 0xffff)
143 if (!fs->val.fport || fs->mask.fport != 0xffff)
146 if (tp->protocol_shift >= 0)
147 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
148 if (tp->ethertype_shift >= 0)
149 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
150 if (tp->port_shift >= 0)
151 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
152 if (tp->macmatch_shift >= 0)
153 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
154 if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
155 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
157 if (tp->vnic_shift >= 0 && fs->mask.ovlan_vld)
158 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ovlan) <<
160 if (tp->tos_shift >= 0)
161 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
163 if (ntuple_mask != hash_filter_mask)
166 fs->cap = 1; /* use hash region */
170 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
171 struct ch_filter_specification *fs,
172 struct rte_flow_error *e)
174 const struct rte_flow_item_eth *spec = item->spec;
175 const struct rte_flow_item_eth *umask = item->mask;
176 const struct rte_flow_item_eth *mask;
178 /* If user has not given any mask, then use chelsio supported mask. */
179 mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
184 /* we don't support SRC_MAC filtering*/
185 if (!rte_is_zero_ether_addr(&mask->src))
186 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
188 "src mac filtering not supported");
190 if (!rte_is_zero_ether_addr(&mask->dst)) {
191 const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
192 const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
193 struct rte_flow *flow = (struct rte_flow *)fs->private;
194 struct port_info *pi = (struct port_info *)
195 (flow->dev->data->dev_private);
198 idx = cxgbe_mpstcam_alloc(pi, addr, m);
200 return rte_flow_error_set(e, idx,
201 RTE_FLOW_ERROR_TYPE_ITEM,
202 NULL, "unable to allocate mac"
204 CXGBE_FILL_FS(idx, 0x1ff, macidx);
207 CXGBE_FILL_FS(be16_to_cpu(spec->type),
208 be16_to_cpu(mask->type), ethtype);
214 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
215 struct ch_filter_specification *fs,
216 struct rte_flow_error *e)
218 const struct rte_flow_item_phy_port *val = item->spec;
219 const struct rte_flow_item_phy_port *umask = item->mask;
220 const struct rte_flow_item_phy_port *mask;
222 mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
224 if (val->index > 0x7)
225 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
227 "port index upto 0x7 is supported");
229 CXGBE_FILL_FS(val->index, mask->index, iport);
235 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
236 struct ch_filter_specification *fs,
237 struct rte_flow_error *e)
239 const struct rte_flow_item_vlan *spec = item->spec;
240 const struct rte_flow_item_vlan *umask = item->mask;
241 const struct rte_flow_item_vlan *mask;
243 /* If user has not given any mask, then use chelsio supported mask. */
244 mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
246 if (!fs->mask.ethtype)
247 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
249 "Can't parse VLAN item without knowing ethertype");
251 /* If ethertype is already set and is not VLAN (0x8100) or
252 * QINQ(0x88A8), then don't proceed further. Otherwise,
253 * reset the outer ethertype, so that it can be replaced by
254 * innermost ethertype. Note that hardware will automatically
255 * match against VLAN or QINQ packets, based on 'ivlan_vld' or
256 * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
258 if (fs->mask.ethtype) {
259 if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
260 fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
261 return rte_flow_error_set(e, EINVAL,
262 RTE_FLOW_ERROR_TYPE_ITEM,
264 "Ethertype must be 0x8100 or 0x88a8");
267 if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
268 CXGBE_FILL_FS(1, 1, ovlan_vld);
270 CXGBE_FILL_FS(be16_to_cpu(spec->tci),
271 be16_to_cpu(mask->tci), ovlan);
273 fs->mask.ethtype = 0;
276 } else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {
277 CXGBE_FILL_FS(1, 1, ivlan_vld);
279 CXGBE_FILL_FS(be16_to_cpu(spec->tci),
280 be16_to_cpu(mask->tci), ivlan);
282 fs->mask.ethtype = 0;
288 CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
289 be16_to_cpu(mask->inner_type), ethtype);
295 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
296 struct ch_filter_specification *fs,
297 struct rte_flow_error *e)
299 const struct rte_flow_item_udp *val = item->spec;
300 const struct rte_flow_item_udp *umask = item->mask;
301 const struct rte_flow_item_udp *mask;
303 mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
305 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
306 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
308 "udp: only src/dst port supported");
310 CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
313 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
314 be16_to_cpu(mask->hdr.src_port), fport);
315 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
316 be16_to_cpu(mask->hdr.dst_port), lport);
321 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
322 struct ch_filter_specification *fs,
323 struct rte_flow_error *e)
325 const struct rte_flow_item_tcp *val = item->spec;
326 const struct rte_flow_item_tcp *umask = item->mask;
327 const struct rte_flow_item_tcp *mask;
329 mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
331 if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
332 mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
334 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
336 "tcp: only src/dst port supported");
338 CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
341 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
342 be16_to_cpu(mask->hdr.src_port), fport);
343 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
344 be16_to_cpu(mask->hdr.dst_port), lport);
349 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
350 struct ch_filter_specification *fs,
351 struct rte_flow_error *e)
353 const struct rte_flow_item_ipv4 *val = item->spec;
354 const struct rte_flow_item_ipv4 *umask = item->mask;
355 const struct rte_flow_item_ipv4 *mask;
357 mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
359 if (mask->hdr.time_to_live)
360 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
361 item, "ttl is not supported");
363 if (fs->mask.ethtype &&
364 (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
365 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
367 "Couldn't find IPv4 ethertype");
368 fs->type = FILTER_TYPE_IPV4;
370 return 0; /* ipv4 wild card */
372 CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
373 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
374 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
375 CXGBE_FILL_FS(val->hdr.type_of_service, mask->hdr.type_of_service, tos);
381 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
382 struct ch_filter_specification *fs,
383 struct rte_flow_error *e)
385 const struct rte_flow_item_ipv6 *val = item->spec;
386 const struct rte_flow_item_ipv6 *umask = item->mask;
387 const struct rte_flow_item_ipv6 *mask;
388 u32 vtc_flow, vtc_flow_mask;
390 mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
392 vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
394 if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
395 mask->hdr.payload_len || mask->hdr.hop_limits)
396 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
398 "flow/hop are not supported");
400 if (fs->mask.ethtype &&
401 (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
402 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
404 "Couldn't find IPv6 ethertype");
405 fs->type = FILTER_TYPE_IPV6;
407 return 0; /* ipv6 wild card */
409 CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
411 vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
412 CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
413 RTE_IPV6_HDR_TC_SHIFT,
414 (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
415 RTE_IPV6_HDR_TC_SHIFT,
418 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
419 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
425 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
426 struct rte_flow_error *e)
429 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
430 attr, "attribute:<egress> is"
433 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
434 attr, "group parameter is"
437 flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
442 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
444 struct port_info *pi = ethdev2pinfo(dev);
446 if (rxq > pi->n_rx_qsets)
451 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
453 struct adapter *adap = ethdev2adap(f->dev);
454 struct ch_filter_specification fs = f->fs;
457 if (fidx >= adap->tids.nftids) {
458 dev_err(adap, "invalid flow index %d.\n", fidx);
462 nentries = cxgbe_filter_slots(adap, fs.type);
463 if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
464 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
472 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
473 struct adapter *adap, unsigned int fidx)
477 nentries = cxgbe_filter_slots(adap, fs->type);
478 if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
479 dev_err(adap, "filter index: %d is busy.\n", fidx);
483 if (fidx >= adap->tids.nftids) {
484 dev_err(adap, "filter index (%u) >= max(%u)\n",
485 fidx, adap->tids.nftids);
493 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
496 return 0; /* Hash filters */
497 return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
498 cxgbe_validate_fidxonadd(&flow->fs,
499 ethdev2adap(flow->dev), fidx);
502 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
504 struct ch_filter_specification *fs = &flow->fs;
505 struct adapter *adap = ethdev2adap(flow->dev);
507 /* For tcam get the next available slot, if default value specified */
508 if (flow->fidx == FILTER_ID_MAX) {
512 nentries = cxgbe_filter_slots(adap, fs->type);
513 idx = cxgbe_alloc_ftid(adap, nentries);
515 dev_err(adap, "unable to get a filter index in tcam\n");
518 *fidx = (unsigned int)idx;
527 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
529 const struct rte_flow_item *i;
530 int j, index = -ENOENT;
532 for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
533 if (i->type == type) {
543 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
546 * BIT_0 = [src_ip], BIT_1 = [dst_ip]
547 * BIT_2 = [src_port], BIT_3 = [dst_port]
549 * Only below cases are supported as per our spec.
553 fs->nat_mode = NAT_MODE_NONE;
556 fs->nat_mode = NAT_MODE_DIP;
559 fs->nat_mode = NAT_MODE_SIP_SP;
562 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
565 fs->nat_mode = NAT_MODE_DIP_DP;
568 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
571 fs->nat_mode = NAT_MODE_DIP_DP_SP;
574 fs->nat_mode = NAT_MODE_ALL;
584 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
585 const struct rte_flow_item items[],
587 struct ch_filter_specification *fs,
588 struct rte_flow_error *e)
590 const struct rte_flow_action_of_set_vlan_vid *vlanid;
591 const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
592 const struct rte_flow_action_of_push_vlan *pushvlan;
593 const struct rte_flow_action_set_ipv4 *ipv4;
594 const struct rte_flow_action_set_ipv6 *ipv6;
595 const struct rte_flow_action_set_tp *tp_port;
596 const struct rte_flow_action_phy_port *port;
601 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
602 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
604 /* If explicitly asked to push a new VLAN header,
605 * then don't set rewrite mode. Otherwise, the
606 * incoming VLAN packets will get their VLAN fields
607 * rewritten, instead of adding an additional outer
610 if (fs->newvlan != VLAN_INSERT)
611 fs->newvlan = VLAN_REWRITE;
612 tmp_vlan = fs->vlan & 0xe000;
613 fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
615 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
616 vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
618 /* If explicitly asked to push a new VLAN header,
619 * then don't set rewrite mode. Otherwise, the
620 * incoming VLAN packets will get their VLAN fields
621 * rewritten, instead of adding an additional outer
624 if (fs->newvlan != VLAN_INSERT)
625 fs->newvlan = VLAN_REWRITE;
626 tmp_vlan = fs->vlan & 0xfff;
627 fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
629 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
630 pushvlan = (const struct rte_flow_action_of_push_vlan *)
632 if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
633 return rte_flow_error_set(e, EINVAL,
634 RTE_FLOW_ERROR_TYPE_ACTION, a,
635 "only ethertype 0x8100 "
636 "supported for push vlan.");
637 fs->newvlan = VLAN_INSERT;
639 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
640 fs->newvlan = VLAN_REMOVE;
642 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
643 port = (const struct rte_flow_action_phy_port *)a->conf;
644 fs->eport = port->index;
646 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
647 item_index = cxgbe_get_flow_item_index(items,
648 RTE_FLOW_ITEM_TYPE_IPV4);
650 return rte_flow_error_set(e, EINVAL,
651 RTE_FLOW_ERROR_TYPE_ACTION, a,
652 "No RTE_FLOW_ITEM_TYPE_IPV4 "
655 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
656 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
659 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
660 item_index = cxgbe_get_flow_item_index(items,
661 RTE_FLOW_ITEM_TYPE_IPV4);
663 return rte_flow_error_set(e, EINVAL,
664 RTE_FLOW_ERROR_TYPE_ACTION, a,
665 "No RTE_FLOW_ITEM_TYPE_IPV4 "
668 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
669 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
672 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
673 item_index = cxgbe_get_flow_item_index(items,
674 RTE_FLOW_ITEM_TYPE_IPV6);
676 return rte_flow_error_set(e, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ACTION, a,
678 "No RTE_FLOW_ITEM_TYPE_IPV6 "
681 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
682 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
685 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
686 item_index = cxgbe_get_flow_item_index(items,
687 RTE_FLOW_ITEM_TYPE_IPV6);
689 return rte_flow_error_set(e, EINVAL,
690 RTE_FLOW_ERROR_TYPE_ACTION, a,
691 "No RTE_FLOW_ITEM_TYPE_IPV6 "
694 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
695 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
698 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
699 item_index = cxgbe_get_flow_item_index(items,
700 RTE_FLOW_ITEM_TYPE_TCP);
701 if (item_index < 0) {
703 cxgbe_get_flow_item_index(items,
704 RTE_FLOW_ITEM_TYPE_UDP);
706 return rte_flow_error_set(e, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ACTION, a,
708 "No RTE_FLOW_ITEM_TYPE_TCP or "
709 "RTE_FLOW_ITEM_TYPE_UDP found");
712 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
713 fs->nat_fport = be16_to_cpu(tp_port->port);
716 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
717 item_index = cxgbe_get_flow_item_index(items,
718 RTE_FLOW_ITEM_TYPE_TCP);
719 if (item_index < 0) {
721 cxgbe_get_flow_item_index(items,
722 RTE_FLOW_ITEM_TYPE_UDP);
724 return rte_flow_error_set(e, EINVAL,
725 RTE_FLOW_ERROR_TYPE_ACTION, a,
726 "No RTE_FLOW_ITEM_TYPE_TCP or "
727 "RTE_FLOW_ITEM_TYPE_UDP found");
730 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
731 fs->nat_lport = be16_to_cpu(tp_port->port);
734 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
735 item_index = cxgbe_get_flow_item_index(items,
736 RTE_FLOW_ITEM_TYPE_ETH);
738 return rte_flow_error_set(e, EINVAL,
739 RTE_FLOW_ERROR_TYPE_ACTION, a,
740 "No RTE_FLOW_ITEM_TYPE_ETH "
745 /* We are not supposed to come here */
746 return rte_flow_error_set(e, EINVAL,
747 RTE_FLOW_ERROR_TYPE_ACTION, a,
748 "Action not supported");
755 cxgbe_rtef_parse_actions(struct rte_flow *flow,
756 const struct rte_flow_item items[],
757 const struct rte_flow_action action[],
758 struct rte_flow_error *e)
760 struct ch_filter_specification *fs = &flow->fs;
761 uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
762 uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
763 const struct rte_flow_action_queue *q;
764 const struct rte_flow_action *a;
768 for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
770 case RTE_FLOW_ACTION_TYPE_VOID:
772 case RTE_FLOW_ACTION_TYPE_DROP:
774 return rte_flow_error_set(e, EINVAL,
775 RTE_FLOW_ERROR_TYPE_ACTION, a,
776 "specify only 1 pass/drop");
777 fs->action = FILTER_DROP;
779 case RTE_FLOW_ACTION_TYPE_QUEUE:
780 q = (const struct rte_flow_action_queue *)a->conf;
782 return rte_flow_error_set(e, EINVAL,
783 RTE_FLOW_ERROR_TYPE_ACTION, q,
784 "specify rx queue index");
785 if (check_rxq(flow->dev, q->index))
786 return rte_flow_error_set(e, EINVAL,
787 RTE_FLOW_ERROR_TYPE_ACTION, q,
790 return rte_flow_error_set(e, EINVAL,
791 RTE_FLOW_ERROR_TYPE_ACTION, a,
792 "specify only 1 pass/drop");
793 fs->action = FILTER_PASS;
797 case RTE_FLOW_ACTION_TYPE_COUNT:
800 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
803 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
806 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
807 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
808 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
809 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
810 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
811 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
814 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
815 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
818 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
819 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
821 /* We allow multiple switch actions, but switch is
822 * not compatible with either queue or drop
824 if (abit++ && fs->action != FILTER_SWITCH)
825 return rte_flow_error_set(e, EINVAL,
826 RTE_FLOW_ERROR_TYPE_ACTION, a,
827 "overlapping action specified");
828 if (nat_ipv4 && nat_ipv6)
829 return rte_flow_error_set(e, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ACTION, a,
831 "Can't have one address ipv4 and the"
834 ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
838 fs->action = FILTER_SWITCH;
841 /* Not supported action : return error */
842 return rte_flow_error_set(e, ENOTSUP,
843 RTE_FLOW_ERROR_TYPE_ACTION,
844 a, "Action not supported");
848 if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
849 return rte_flow_error_set(e, EINVAL,
850 RTE_FLOW_ERROR_TYPE_ACTION, a,
851 "Both OF_SET_VLAN_VID and "
852 "OF_SET_VLAN_PCP must be specified");
854 if (ch_rte_parse_nat(nmode, fs))
855 return rte_flow_error_set(e, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ACTION, a,
857 "invalid settings for swich action");
861 static struct chrte_fparse parseitem[] = {
862 [RTE_FLOW_ITEM_TYPE_ETH] = {
863 .fptr = ch_rte_parsetype_eth,
864 .dmask = &(const struct rte_flow_item_eth){
865 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
866 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
871 [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
872 .fptr = ch_rte_parsetype_port,
873 .dmask = &(const struct rte_flow_item_phy_port){
878 [RTE_FLOW_ITEM_TYPE_VLAN] = {
879 .fptr = ch_rte_parsetype_vlan,
880 .dmask = &(const struct rte_flow_item_vlan){
882 .inner_type = 0xffff,
886 [RTE_FLOW_ITEM_TYPE_IPV4] = {
887 .fptr = ch_rte_parsetype_ipv4,
888 .dmask = &(const struct rte_flow_item_ipv4) {
890 .src_addr = RTE_BE32(0xffffffff),
891 .dst_addr = RTE_BE32(0xffffffff),
892 .type_of_service = 0xff,
897 [RTE_FLOW_ITEM_TYPE_IPV6] = {
898 .fptr = ch_rte_parsetype_ipv6,
899 .dmask = &(const struct rte_flow_item_ipv6) {
902 "\xff\xff\xff\xff\xff\xff\xff\xff"
903 "\xff\xff\xff\xff\xff\xff\xff\xff",
905 "\xff\xff\xff\xff\xff\xff\xff\xff"
906 "\xff\xff\xff\xff\xff\xff\xff\xff",
907 .vtc_flow = RTE_BE32(0xff000000),
912 [RTE_FLOW_ITEM_TYPE_UDP] = {
913 .fptr = ch_rte_parsetype_udp,
914 .dmask = &rte_flow_item_udp_mask,
917 [RTE_FLOW_ITEM_TYPE_TCP] = {
918 .fptr = ch_rte_parsetype_tcp,
919 .dmask = &rte_flow_item_tcp_mask,
924 cxgbe_rtef_parse_items(struct rte_flow *flow,
925 const struct rte_flow_item items[],
926 struct rte_flow_error *e)
928 struct adapter *adap = ethdev2adap(flow->dev);
929 const struct rte_flow_item *i;
930 char repeat[ARRAY_SIZE(parseitem)] = {0};
932 for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
933 struct chrte_fparse *idx;
936 if (i->type >= ARRAY_SIZE(parseitem))
937 return rte_flow_error_set(e, ENOTSUP,
938 RTE_FLOW_ERROR_TYPE_ITEM,
939 i, "Item not supported");
942 case RTE_FLOW_ITEM_TYPE_VOID:
945 /* check if item is repeated */
946 if (repeat[i->type] &&
947 i->type != RTE_FLOW_ITEM_TYPE_VLAN)
948 return rte_flow_error_set(e, ENOTSUP,
949 RTE_FLOW_ERROR_TYPE_ITEM, i,
950 "parse items cannot be repeated(except void/vlan)");
954 /* No spec found for this pattern item. Skip it */
958 /* validate the item */
959 ret = cxgbe_validate_item(i, e);
963 idx = &flow->item_parser[i->type];
964 if (!idx || !idx->fptr) {
965 return rte_flow_error_set(e, ENOTSUP,
966 RTE_FLOW_ERROR_TYPE_ITEM, i,
967 "Item not supported");
969 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
976 cxgbe_fill_filter_region(adap, &flow->fs);
977 cxgbe_tweak_filter_spec(adap, &flow->fs);
983 cxgbe_flow_parse(struct rte_flow *flow,
984 const struct rte_flow_attr *attr,
985 const struct rte_flow_item item[],
986 const struct rte_flow_action action[],
987 struct rte_flow_error *e)
990 /* parse user request into ch_filter_specification */
991 ret = cxgbe_rtef_parse_attr(flow, attr, e);
994 ret = cxgbe_rtef_parse_items(flow, item, e);
997 return cxgbe_rtef_parse_actions(flow, item, action, e);
1000 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
1002 struct ch_filter_specification *fs = &flow->fs;
1003 struct adapter *adap = ethdev2adap(dev);
1004 struct tid_info *t = &adap->tids;
1005 struct filter_ctx ctx;
1009 if (cxgbe_get_fidx(flow, &fidx))
1011 if (cxgbe_verify_fidx(flow, fidx, 0))
1014 t4_init_completion(&ctx.completion);
1015 /* go create the filter */
1016 err = cxgbe_set_filter(dev, fidx, fs, &ctx);
1018 dev_err(adap, "Error %d while creating filter.\n", err);
1022 /* Poll the FW for reply */
1023 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1025 CXGBE_FLOW_POLL_CNT,
1028 dev_err(adap, "Filter set operation timed out (%d)\n", err);
1032 dev_err(adap, "Hardware error %d while creating the filter.\n",
1037 if (fs->cap) { /* to destroy the filter */
1038 flow->fidx = ctx.tid;
1039 flow->f = lookup_tid(t, ctx.tid);
1042 flow->f = &adap->tids.ftid_tab[fidx];
1048 static struct rte_flow *
1049 cxgbe_flow_create(struct rte_eth_dev *dev,
1050 const struct rte_flow_attr *attr,
1051 const struct rte_flow_item item[],
1052 const struct rte_flow_action action[],
1053 struct rte_flow_error *e)
1055 struct adapter *adap = ethdev2adap(dev);
1056 struct rte_flow *flow;
1059 flow = t4_os_alloc(sizeof(struct rte_flow));
1061 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1062 NULL, "Unable to allocate memory for"
1067 flow->item_parser = parseitem;
1069 flow->fs.private = (void *)flow;
1071 if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1076 t4_os_lock(&adap->flow_lock);
1077 /* go, interact with cxgbe_filter */
1078 ret = __cxgbe_flow_create(dev, flow);
1079 t4_os_unlock(&adap->flow_lock);
1081 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1082 NULL, "Unable to create flow rule");
1087 flow->f->private = flow; /* Will be used during flush */
1092 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1094 struct adapter *adap = ethdev2adap(dev);
1095 struct filter_entry *f = flow->f;
1096 struct ch_filter_specification *fs;
1097 struct filter_ctx ctx;
1101 if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1104 t4_init_completion(&ctx.completion);
1105 err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1107 dev_err(adap, "Error %d while deleting filter.\n", err);
1111 /* Poll the FW for reply */
1112 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1114 CXGBE_FLOW_POLL_CNT,
1117 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1121 dev_err(adap, "Hardware error %d while deleting the filter.\n",
1127 if (fs->mask.macidx) {
1128 struct port_info *pi = (struct port_info *)
1129 (dev->data->dev_private);
1132 ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
1141 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1142 struct rte_flow_error *e)
1144 struct adapter *adap = ethdev2adap(dev);
1147 t4_os_lock(&adap->flow_lock);
1148 ret = __cxgbe_flow_destroy(dev, flow);
1149 t4_os_unlock(&adap->flow_lock);
1151 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1152 flow, "error destroying filter.");
1157 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1160 struct adapter *adap = ethdev2adap(flow->dev);
1161 struct ch_filter_specification fs = flow->f->fs;
1162 unsigned int fidx = flow->fidx;
1165 ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1168 return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1172 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1173 const struct rte_flow_action *action, void *data,
1174 struct rte_flow_error *e)
1176 struct adapter *adap = ethdev2adap(flow->dev);
1177 struct ch_filter_specification fs;
1178 struct rte_flow_query_count *c;
1179 struct filter_entry *f;
1187 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1188 return rte_flow_error_set(e, ENOTSUP,
1189 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1190 "only count supported for query");
1193 * This is a valid operation, Since we are allowed to do chelsio
1194 * specific operations in rte side of our code but not vise-versa
1196 * So, fs can be queried/modified here BUT rte_flow_query_count
1197 * cannot be worked on by the lower layer since we want to maintain
1198 * it as rte_flow agnostic.
1201 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1202 &fs, "filter hit counters were not"
1203 " enabled during filter creation");
1205 c = (struct rte_flow_query_count *)data;
1207 t4_os_lock(&adap->flow_lock);
1208 ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1210 rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1211 f, "cxgbe pmd failed to perform query");
1215 /* Query was successful */
1219 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1222 t4_os_unlock(&adap->flow_lock);
1227 cxgbe_flow_validate(struct rte_eth_dev *dev,
1228 const struct rte_flow_attr *attr,
1229 const struct rte_flow_item item[],
1230 const struct rte_flow_action action[],
1231 struct rte_flow_error *e)
1233 struct adapter *adap = ethdev2adap(dev);
1234 struct rte_flow *flow;
1238 flow = t4_os_alloc(sizeof(struct rte_flow));
1240 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1242 "Unable to allocate memory for filter_entry");
1244 flow->item_parser = parseitem;
1247 ret = cxgbe_flow_parse(flow, attr, item, action, e);
1253 if (cxgbe_validate_filter(adap, &flow->fs)) {
1255 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1257 "validation failed. Check f/w config file.");
1260 t4_os_lock(&adap->flow_lock);
1261 if (cxgbe_get_fidx(flow, &fidx)) {
1262 ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1263 NULL, "no memory in tcam.");
1267 if (cxgbe_verify_fidx(flow, fidx, 0)) {
1268 ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1269 NULL, "validation failed");
1274 t4_os_unlock(&adap->flow_lock);
1280 * @ret : > 0 filter destroyed succsesfully
1281 * < 0 error destroying filter
1282 * == 1 filter not active / not found
1285 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1287 if (f && (f->valid || f->pending) &&
1288 f->dev == dev && /* Only if user has asked for this port */
1289 f->private) /* We (rte_flow) created this filter */
1290 return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1294 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1296 struct adapter *adap = ethdev2adap(dev);
1300 t4_os_lock(&adap->flow_lock);
1301 if (adap->tids.ftid_tab) {
1302 struct filter_entry *f = &adap->tids.ftid_tab[0];
1304 for (i = 0; i < adap->tids.nftids; i++, f++) {
1305 ret = cxgbe_check_n_destroy(f, dev);
1307 rte_flow_error_set(e, ret,
1308 RTE_FLOW_ERROR_TYPE_HANDLE,
1310 "error destroying TCAM "
1317 if (is_hashfilter(adap) && adap->tids.tid_tab) {
1318 struct filter_entry *f;
1320 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1321 f = (struct filter_entry *)adap->tids.tid_tab[i];
1323 ret = cxgbe_check_n_destroy(f, dev);
1325 rte_flow_error_set(e, ret,
1326 RTE_FLOW_ERROR_TYPE_HANDLE,
1328 "error destroying HASH "
1336 t4_os_unlock(&adap->flow_lock);
1337 return ret >= 0 ? 0 : ret;
1340 static const struct rte_flow_ops cxgbe_flow_ops = {
1341 .validate = cxgbe_flow_validate,
1342 .create = cxgbe_flow_create,
1343 .destroy = cxgbe_flow_destroy,
1344 .flush = cxgbe_flow_flush,
1345 .query = cxgbe_flow_query,
1350 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1351 enum rte_filter_type filter_type,
1352 enum rte_filter_op filter_op,
1358 switch (filter_type) {
1359 case RTE_ETH_FILTER_GENERIC:
1360 if (filter_op != RTE_ETH_FILTER_GET)
1362 *(const void **)arg = &cxgbe_flow_ops;