1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
10 if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12 NULL, "Redefined match item with" \
13 " different values found"); \
14 (fs)->val.elem = (__v); \
15 (fs)->mask.elem = (__m); \
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
20 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 #define CXGBE_FILL_FS(v, m, elem) \
25 __CXGBE_FILL_FS(v, m, fs, elem, e)
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
33 /* rte_flow specification does not allow it. */
34 if (!i->spec && (i->mask || i->last))
35 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36 i, "last or mask given without spec");
38 * We don't support it.
39 * Although, we can support values in last as 0's or last == spec.
40 * But this will not provide user with any additional functionality
41 * and will only increase the complexity for us.
44 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45 i, "last is not supported by chelsio pmd");
50 * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51 * there's only 40-bits available to store match fields.
52 * So, to save space, optimize filter spec for some common
53 * known fields that hardware can parse against incoming
54 * packets automatically.
57 cxgbe_tweak_filter_spec(struct adapter *adap,
58 struct ch_filter_specification *fs)
60 /* Save 16-bit ethertype field space, by setting corresponding
61 * 1-bit flags in the filter spec for common known ethertypes.
62 * When hardware sees these flags, it automatically infers and
63 * matches incoming packets against the corresponding ethertype.
65 if (fs->mask.ethtype == 0xffff) {
66 switch (fs->val.ethtype) {
67 case RTE_ETHER_TYPE_IPV4:
68 if (adap->params.tp.ethertype_shift < 0) {
69 fs->type = FILTER_TYPE_IPV4;
74 case RTE_ETHER_TYPE_IPV6:
75 if (adap->params.tp.ethertype_shift < 0) {
76 fs->type = FILTER_TYPE_IPV6;
81 case RTE_ETHER_TYPE_VLAN:
82 if (adap->params.tp.ethertype_shift < 0 &&
83 adap->params.tp.vlan_shift >= 0) {
84 fs->val.ivlan_vld = 1;
85 fs->mask.ivlan_vld = 1;
97 cxgbe_fill_filter_region(struct adapter *adap,
98 struct ch_filter_specification *fs)
100 struct tp_params *tp = &adap->params.tp;
101 u64 hash_filter_mask = tp->hash_filter_mask;
106 if (!is_hashfilter(adap))
110 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
111 0xff, 0xff, 0xff, 0xff,
112 0xff, 0xff, 0xff, 0xff,
113 0xff, 0xff, 0xff, 0xff};
114 uint8_t bitoff[16] = {0};
116 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
117 !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
118 memcmp(fs->mask.lip, biton, sizeof(biton)) ||
119 memcmp(fs->mask.fip, biton, sizeof(biton)))
122 uint32_t biton = 0xffffffff;
123 uint32_t bitoff = 0x0U;
125 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
126 !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
127 memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
128 memcmp(fs->mask.fip, &biton, sizeof(biton)))
132 if (!fs->val.lport || fs->mask.lport != 0xffff)
134 if (!fs->val.fport || fs->mask.fport != 0xffff)
137 if (tp->protocol_shift >= 0)
138 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
139 if (tp->ethertype_shift >= 0)
140 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
141 if (tp->port_shift >= 0)
142 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
143 if (tp->macmatch_shift >= 0)
144 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
145 if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
146 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
149 if (ntuple_mask != hash_filter_mask)
152 fs->cap = 1; /* use hash region */
156 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
157 struct ch_filter_specification *fs,
158 struct rte_flow_error *e)
160 const struct rte_flow_item_eth *spec = item->spec;
161 const struct rte_flow_item_eth *umask = item->mask;
162 const struct rte_flow_item_eth *mask;
164 /* If user has not given any mask, then use chelsio supported mask. */
165 mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
170 /* Chelsio hardware supports matching on only one ethertype
171 * (i.e. either the outer or inner ethertype, but not both). If
172 * we already encountered VLAN item, then ensure that the outer
173 * ethertype is VLAN (0x8100) and don't overwrite the inner
174 * ethertype stored during VLAN item parsing. Note that if
175 * 'ivlan_vld' bit is set in Chelsio filter spec, then the
176 * hardware automatically only matches packets with outer
177 * ethertype having VLAN (0x8100).
179 if (fs->mask.ivlan_vld &&
180 be16_to_cpu(spec->type) != RTE_ETHER_TYPE_VLAN)
181 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
183 "Already encountered VLAN item,"
184 " but outer ethertype is not 0x8100");
186 /* we don't support SRC_MAC filtering*/
187 if (!rte_is_zero_ether_addr(&mask->src))
188 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
190 "src mac filtering not supported");
192 if (!rte_is_zero_ether_addr(&mask->dst)) {
193 const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
194 const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
195 struct rte_flow *flow = (struct rte_flow *)fs->private;
196 struct port_info *pi = (struct port_info *)
197 (flow->dev->data->dev_private);
200 idx = cxgbe_mpstcam_alloc(pi, addr, m);
202 return rte_flow_error_set(e, idx,
203 RTE_FLOW_ERROR_TYPE_ITEM,
204 NULL, "unable to allocate mac"
206 CXGBE_FILL_FS(idx, 0x1ff, macidx);
209 /* Only set outer ethertype, if we didn't encounter VLAN item yet.
210 * Otherwise, the inner ethertype set by VLAN item will get
213 if (!fs->mask.ivlan_vld)
214 CXGBE_FILL_FS(be16_to_cpu(spec->type),
215 be16_to_cpu(mask->type), ethtype);
220 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
221 struct ch_filter_specification *fs,
222 struct rte_flow_error *e)
224 const struct rte_flow_item_phy_port *val = item->spec;
225 const struct rte_flow_item_phy_port *umask = item->mask;
226 const struct rte_flow_item_phy_port *mask;
228 mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
230 if (val->index > 0x7)
231 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
233 "port index upto 0x7 is supported");
235 CXGBE_FILL_FS(val->index, mask->index, iport);
241 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
242 struct ch_filter_specification *fs,
243 struct rte_flow_error *e)
245 const struct rte_flow_item_vlan *spec = item->spec;
246 const struct rte_flow_item_vlan *umask = item->mask;
247 const struct rte_flow_item_vlan *mask;
249 /* If user has not given any mask, then use chelsio supported mask. */
250 mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
252 CXGBE_FILL_FS(1, 1, ivlan_vld);
254 return 0; /* Wildcard, match all VLAN */
256 /* Chelsio hardware supports matching on only one ethertype
257 * (i.e. either the outer or inner ethertype, but not both).
258 * If outer ethertype is already set and is not VLAN (0x8100),
259 * then don't proceed further. Otherwise, reset the outer
260 * ethertype, so that it can be replaced by inner ethertype.
261 * Note that the hardware will automatically match on outer
262 * ethertype 0x8100, if 'ivlan_vld' bit is set in Chelsio
265 if (fs->mask.ethtype) {
266 if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN)
267 return rte_flow_error_set(e, EINVAL,
268 RTE_FLOW_ERROR_TYPE_ITEM,
270 "Outer ethertype not 0x8100");
273 fs->mask.ethtype = 0;
276 CXGBE_FILL_FS(be16_to_cpu(spec->tci), be16_to_cpu(mask->tci), ivlan);
277 if (spec->inner_type)
278 CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
279 be16_to_cpu(mask->inner_type), ethtype);
285 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
286 struct ch_filter_specification *fs,
287 struct rte_flow_error *e)
289 const struct rte_flow_item_udp *val = item->spec;
290 const struct rte_flow_item_udp *umask = item->mask;
291 const struct rte_flow_item_udp *mask;
293 mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
295 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
296 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
298 "udp: only src/dst port supported");
300 CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
303 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
304 be16_to_cpu(mask->hdr.src_port), fport);
305 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
306 be16_to_cpu(mask->hdr.dst_port), lport);
311 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
312 struct ch_filter_specification *fs,
313 struct rte_flow_error *e)
315 const struct rte_flow_item_tcp *val = item->spec;
316 const struct rte_flow_item_tcp *umask = item->mask;
317 const struct rte_flow_item_tcp *mask;
319 mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
321 if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
322 mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
324 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
326 "tcp: only src/dst port supported");
328 CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
331 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
332 be16_to_cpu(mask->hdr.src_port), fport);
333 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
334 be16_to_cpu(mask->hdr.dst_port), lport);
339 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
340 struct ch_filter_specification *fs,
341 struct rte_flow_error *e)
343 const struct rte_flow_item_ipv4 *val = item->spec;
344 const struct rte_flow_item_ipv4 *umask = item->mask;
345 const struct rte_flow_item_ipv4 *mask;
347 mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
349 if (mask->hdr.time_to_live || mask->hdr.type_of_service)
350 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
351 item, "ttl/tos are not supported");
353 if (fs->mask.ethtype &&
354 (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
355 fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
356 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
358 "Couldn't find IPv4 ethertype");
359 fs->type = FILTER_TYPE_IPV4;
361 return 0; /* ipv4 wild card */
363 CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
364 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
365 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
371 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
372 struct ch_filter_specification *fs,
373 struct rte_flow_error *e)
375 const struct rte_flow_item_ipv6 *val = item->spec;
376 const struct rte_flow_item_ipv6 *umask = item->mask;
377 const struct rte_flow_item_ipv6 *mask;
379 mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
381 if (mask->hdr.vtc_flow ||
382 mask->hdr.payload_len || mask->hdr.hop_limits)
383 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
385 "tc/flow/hop are not supported");
387 if (fs->mask.ethtype &&
388 (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
389 fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
390 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
392 "Couldn't find IPv6 ethertype");
393 fs->type = FILTER_TYPE_IPV6;
395 return 0; /* ipv6 wild card */
397 CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
398 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
399 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
405 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
406 struct rte_flow_error *e)
409 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
410 attr, "attribute:<egress> is"
413 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
414 attr, "group parameter is"
417 flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
422 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
424 struct port_info *pi = ethdev2pinfo(dev);
426 if (rxq > pi->n_rx_qsets)
431 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
433 struct adapter *adap = ethdev2adap(f->dev);
434 struct ch_filter_specification fs = f->fs;
437 if (fidx >= adap->tids.nftids) {
438 dev_err(adap, "invalid flow index %d.\n", fidx);
442 nentries = cxgbe_filter_slots(adap, fs.type);
443 if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
444 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
452 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
453 struct adapter *adap, unsigned int fidx)
457 nentries = cxgbe_filter_slots(adap, fs->type);
458 if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
459 dev_err(adap, "filter index: %d is busy.\n", fidx);
463 if (fidx >= adap->tids.nftids) {
464 dev_err(adap, "filter index (%u) >= max(%u)\n",
465 fidx, adap->tids.nftids);
473 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
476 return 0; /* Hash filters */
477 return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
478 cxgbe_validate_fidxonadd(&flow->fs,
479 ethdev2adap(flow->dev), fidx);
482 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
484 struct ch_filter_specification *fs = &flow->fs;
485 struct adapter *adap = ethdev2adap(flow->dev);
487 /* For tcam get the next available slot, if default value specified */
488 if (flow->fidx == FILTER_ID_MAX) {
492 nentries = cxgbe_filter_slots(adap, fs->type);
493 idx = cxgbe_alloc_ftid(adap, nentries);
495 dev_err(adap, "unable to get a filter index in tcam\n");
498 *fidx = (unsigned int)idx;
507 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
509 const struct rte_flow_item *i;
510 int j, index = -ENOENT;
512 for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
513 if (i->type == type) {
523 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
526 * BIT_0 = [src_ip], BIT_1 = [dst_ip]
527 * BIT_2 = [src_port], BIT_3 = [dst_port]
529 * Only below cases are supported as per our spec.
533 fs->nat_mode = NAT_MODE_NONE;
536 fs->nat_mode = NAT_MODE_DIP;
539 fs->nat_mode = NAT_MODE_SIP_SP;
542 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
545 fs->nat_mode = NAT_MODE_DIP_DP;
548 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
551 fs->nat_mode = NAT_MODE_DIP_DP_SP;
554 fs->nat_mode = NAT_MODE_ALL;
564 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
565 const struct rte_flow_item items[],
567 struct ch_filter_specification *fs,
568 struct rte_flow_error *e)
570 const struct rte_flow_action_of_set_vlan_vid *vlanid;
571 const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
572 const struct rte_flow_action_of_push_vlan *pushvlan;
573 const struct rte_flow_action_set_ipv4 *ipv4;
574 const struct rte_flow_action_set_ipv6 *ipv6;
575 const struct rte_flow_action_set_tp *tp_port;
576 const struct rte_flow_action_phy_port *port;
581 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
582 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
584 /* If explicitly asked to push a new VLAN header,
585 * then don't set rewrite mode. Otherwise, the
586 * incoming VLAN packets will get their VLAN fields
587 * rewritten, instead of adding an additional outer
590 if (fs->newvlan != VLAN_INSERT)
591 fs->newvlan = VLAN_REWRITE;
592 tmp_vlan = fs->vlan & 0xe000;
593 fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
595 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
596 vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
598 /* If explicitly asked to push a new VLAN header,
599 * then don't set rewrite mode. Otherwise, the
600 * incoming VLAN packets will get their VLAN fields
601 * rewritten, instead of adding an additional outer
604 if (fs->newvlan != VLAN_INSERT)
605 fs->newvlan = VLAN_REWRITE;
606 tmp_vlan = fs->vlan & 0xfff;
607 fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
609 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
610 pushvlan = (const struct rte_flow_action_of_push_vlan *)
612 if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
613 return rte_flow_error_set(e, EINVAL,
614 RTE_FLOW_ERROR_TYPE_ACTION, a,
615 "only ethertype 0x8100 "
616 "supported for push vlan.");
617 fs->newvlan = VLAN_INSERT;
619 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
620 fs->newvlan = VLAN_REMOVE;
622 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
623 port = (const struct rte_flow_action_phy_port *)a->conf;
624 fs->eport = port->index;
626 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
627 item_index = cxgbe_get_flow_item_index(items,
628 RTE_FLOW_ITEM_TYPE_IPV4);
630 return rte_flow_error_set(e, EINVAL,
631 RTE_FLOW_ERROR_TYPE_ACTION, a,
632 "No RTE_FLOW_ITEM_TYPE_IPV4 "
635 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
636 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
639 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
640 item_index = cxgbe_get_flow_item_index(items,
641 RTE_FLOW_ITEM_TYPE_IPV4);
643 return rte_flow_error_set(e, EINVAL,
644 RTE_FLOW_ERROR_TYPE_ACTION, a,
645 "No RTE_FLOW_ITEM_TYPE_IPV4 "
648 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
649 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
652 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
653 item_index = cxgbe_get_flow_item_index(items,
654 RTE_FLOW_ITEM_TYPE_IPV6);
656 return rte_flow_error_set(e, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ACTION, a,
658 "No RTE_FLOW_ITEM_TYPE_IPV6 "
661 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
662 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
665 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
666 item_index = cxgbe_get_flow_item_index(items,
667 RTE_FLOW_ITEM_TYPE_IPV6);
669 return rte_flow_error_set(e, EINVAL,
670 RTE_FLOW_ERROR_TYPE_ACTION, a,
671 "No RTE_FLOW_ITEM_TYPE_IPV6 "
674 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
675 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
678 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
679 item_index = cxgbe_get_flow_item_index(items,
680 RTE_FLOW_ITEM_TYPE_TCP);
681 if (item_index < 0) {
683 cxgbe_get_flow_item_index(items,
684 RTE_FLOW_ITEM_TYPE_UDP);
686 return rte_flow_error_set(e, EINVAL,
687 RTE_FLOW_ERROR_TYPE_ACTION, a,
688 "No RTE_FLOW_ITEM_TYPE_TCP or "
689 "RTE_FLOW_ITEM_TYPE_UDP found");
692 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
693 fs->nat_fport = be16_to_cpu(tp_port->port);
696 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
697 item_index = cxgbe_get_flow_item_index(items,
698 RTE_FLOW_ITEM_TYPE_TCP);
699 if (item_index < 0) {
701 cxgbe_get_flow_item_index(items,
702 RTE_FLOW_ITEM_TYPE_UDP);
704 return rte_flow_error_set(e, EINVAL,
705 RTE_FLOW_ERROR_TYPE_ACTION, a,
706 "No RTE_FLOW_ITEM_TYPE_TCP or "
707 "RTE_FLOW_ITEM_TYPE_UDP found");
710 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
711 fs->nat_lport = be16_to_cpu(tp_port->port);
714 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
715 item_index = cxgbe_get_flow_item_index(items,
716 RTE_FLOW_ITEM_TYPE_ETH);
718 return rte_flow_error_set(e, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ACTION, a,
720 "No RTE_FLOW_ITEM_TYPE_ETH "
725 /* We are not supposed to come here */
726 return rte_flow_error_set(e, EINVAL,
727 RTE_FLOW_ERROR_TYPE_ACTION, a,
728 "Action not supported");
735 cxgbe_rtef_parse_actions(struct rte_flow *flow,
736 const struct rte_flow_item items[],
737 const struct rte_flow_action action[],
738 struct rte_flow_error *e)
740 struct ch_filter_specification *fs = &flow->fs;
741 uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
742 uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
743 const struct rte_flow_action_queue *q;
744 const struct rte_flow_action *a;
748 for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
750 case RTE_FLOW_ACTION_TYPE_VOID:
752 case RTE_FLOW_ACTION_TYPE_DROP:
754 return rte_flow_error_set(e, EINVAL,
755 RTE_FLOW_ERROR_TYPE_ACTION, a,
756 "specify only 1 pass/drop");
757 fs->action = FILTER_DROP;
759 case RTE_FLOW_ACTION_TYPE_QUEUE:
760 q = (const struct rte_flow_action_queue *)a->conf;
762 return rte_flow_error_set(e, EINVAL,
763 RTE_FLOW_ERROR_TYPE_ACTION, q,
764 "specify rx queue index");
765 if (check_rxq(flow->dev, q->index))
766 return rte_flow_error_set(e, EINVAL,
767 RTE_FLOW_ERROR_TYPE_ACTION, q,
770 return rte_flow_error_set(e, EINVAL,
771 RTE_FLOW_ERROR_TYPE_ACTION, a,
772 "specify only 1 pass/drop");
773 fs->action = FILTER_PASS;
777 case RTE_FLOW_ACTION_TYPE_COUNT:
780 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
783 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
786 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
787 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
788 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
789 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
790 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
791 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
794 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
795 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
798 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
799 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
801 /* We allow multiple switch actions, but switch is
802 * not compatible with either queue or drop
804 if (abit++ && fs->action != FILTER_SWITCH)
805 return rte_flow_error_set(e, EINVAL,
806 RTE_FLOW_ERROR_TYPE_ACTION, a,
807 "overlapping action specified");
808 if (nat_ipv4 && nat_ipv6)
809 return rte_flow_error_set(e, EINVAL,
810 RTE_FLOW_ERROR_TYPE_ACTION, a,
811 "Can't have one address ipv4 and the"
814 ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
818 fs->action = FILTER_SWITCH;
821 /* Not supported action : return error */
822 return rte_flow_error_set(e, ENOTSUP,
823 RTE_FLOW_ERROR_TYPE_ACTION,
824 a, "Action not supported");
828 if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
829 return rte_flow_error_set(e, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ACTION, a,
831 "Both OF_SET_VLAN_VID and "
832 "OF_SET_VLAN_PCP must be specified");
834 if (ch_rte_parse_nat(nmode, fs))
835 return rte_flow_error_set(e, EINVAL,
836 RTE_FLOW_ERROR_TYPE_ACTION, a,
837 "invalid settings for swich action");
841 static struct chrte_fparse parseitem[] = {
842 [RTE_FLOW_ITEM_TYPE_ETH] = {
843 .fptr = ch_rte_parsetype_eth,
844 .dmask = &(const struct rte_flow_item_eth){
845 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
846 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
851 [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
852 .fptr = ch_rte_parsetype_port,
853 .dmask = &(const struct rte_flow_item_phy_port){
858 [RTE_FLOW_ITEM_TYPE_VLAN] = {
859 .fptr = ch_rte_parsetype_vlan,
860 .dmask = &(const struct rte_flow_item_vlan){
862 .inner_type = 0xffff,
866 [RTE_FLOW_ITEM_TYPE_IPV4] = {
867 .fptr = ch_rte_parsetype_ipv4,
868 .dmask = &rte_flow_item_ipv4_mask,
871 [RTE_FLOW_ITEM_TYPE_IPV6] = {
872 .fptr = ch_rte_parsetype_ipv6,
873 .dmask = &rte_flow_item_ipv6_mask,
876 [RTE_FLOW_ITEM_TYPE_UDP] = {
877 .fptr = ch_rte_parsetype_udp,
878 .dmask = &rte_flow_item_udp_mask,
881 [RTE_FLOW_ITEM_TYPE_TCP] = {
882 .fptr = ch_rte_parsetype_tcp,
883 .dmask = &rte_flow_item_tcp_mask,
888 cxgbe_rtef_parse_items(struct rte_flow *flow,
889 const struct rte_flow_item items[],
890 struct rte_flow_error *e)
892 struct adapter *adap = ethdev2adap(flow->dev);
893 const struct rte_flow_item *i;
894 char repeat[ARRAY_SIZE(parseitem)] = {0};
896 for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
897 struct chrte_fparse *idx;
900 if (i->type >= ARRAY_SIZE(parseitem))
901 return rte_flow_error_set(e, ENOTSUP,
902 RTE_FLOW_ERROR_TYPE_ITEM,
903 i, "Item not supported");
906 case RTE_FLOW_ITEM_TYPE_VOID:
909 /* check if item is repeated */
911 return rte_flow_error_set(e, ENOTSUP,
912 RTE_FLOW_ERROR_TYPE_ITEM, i,
913 "parse items cannot be repeated (except void)");
916 /* No spec found for this pattern item. Skip it */
920 /* validate the item */
921 ret = cxgbe_validate_item(i, e);
925 idx = &flow->item_parser[i->type];
926 if (!idx || !idx->fptr) {
927 return rte_flow_error_set(e, ENOTSUP,
928 RTE_FLOW_ERROR_TYPE_ITEM, i,
929 "Item not supported");
931 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
938 cxgbe_fill_filter_region(adap, &flow->fs);
939 cxgbe_tweak_filter_spec(adap, &flow->fs);
945 cxgbe_flow_parse(struct rte_flow *flow,
946 const struct rte_flow_attr *attr,
947 const struct rte_flow_item item[],
948 const struct rte_flow_action action[],
949 struct rte_flow_error *e)
952 /* parse user request into ch_filter_specification */
953 ret = cxgbe_rtef_parse_attr(flow, attr, e);
956 ret = cxgbe_rtef_parse_items(flow, item, e);
959 return cxgbe_rtef_parse_actions(flow, item, action, e);
962 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
964 struct ch_filter_specification *fs = &flow->fs;
965 struct adapter *adap = ethdev2adap(dev);
966 struct tid_info *t = &adap->tids;
967 struct filter_ctx ctx;
971 if (cxgbe_get_fidx(flow, &fidx))
973 if (cxgbe_verify_fidx(flow, fidx, 0))
976 t4_init_completion(&ctx.completion);
977 /* go create the filter */
978 err = cxgbe_set_filter(dev, fidx, fs, &ctx);
980 dev_err(adap, "Error %d while creating filter.\n", err);
984 /* Poll the FW for reply */
985 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
990 dev_err(adap, "Filter set operation timed out (%d)\n", err);
994 dev_err(adap, "Hardware error %d while creating the filter.\n",
999 if (fs->cap) { /* to destroy the filter */
1000 flow->fidx = ctx.tid;
1001 flow->f = lookup_tid(t, ctx.tid);
1004 flow->f = &adap->tids.ftid_tab[fidx];
1010 static struct rte_flow *
1011 cxgbe_flow_create(struct rte_eth_dev *dev,
1012 const struct rte_flow_attr *attr,
1013 const struct rte_flow_item item[],
1014 const struct rte_flow_action action[],
1015 struct rte_flow_error *e)
1017 struct adapter *adap = ethdev2adap(dev);
1018 struct rte_flow *flow;
1021 flow = t4_os_alloc(sizeof(struct rte_flow));
1023 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1024 NULL, "Unable to allocate memory for"
1029 flow->item_parser = parseitem;
1031 flow->fs.private = (void *)flow;
1033 if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1038 t4_os_lock(&adap->flow_lock);
1039 /* go, interact with cxgbe_filter */
1040 ret = __cxgbe_flow_create(dev, flow);
1041 t4_os_unlock(&adap->flow_lock);
1043 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1044 NULL, "Unable to create flow rule");
1049 flow->f->private = flow; /* Will be used during flush */
1054 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1056 struct adapter *adap = ethdev2adap(dev);
1057 struct filter_entry *f = flow->f;
1058 struct ch_filter_specification *fs;
1059 struct filter_ctx ctx;
1063 if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1066 t4_init_completion(&ctx.completion);
1067 err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1069 dev_err(adap, "Error %d while deleting filter.\n", err);
1073 /* Poll the FW for reply */
1074 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1076 CXGBE_FLOW_POLL_CNT,
1079 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1083 dev_err(adap, "Hardware error %d while deleting the filter.\n",
1089 if (fs->mask.macidx) {
1090 struct port_info *pi = (struct port_info *)
1091 (dev->data->dev_private);
1094 ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
1103 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1104 struct rte_flow_error *e)
1106 struct adapter *adap = ethdev2adap(dev);
1109 t4_os_lock(&adap->flow_lock);
1110 ret = __cxgbe_flow_destroy(dev, flow);
1111 t4_os_unlock(&adap->flow_lock);
1113 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1114 flow, "error destroying filter.");
1119 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1122 struct adapter *adap = ethdev2adap(flow->dev);
1123 struct ch_filter_specification fs = flow->f->fs;
1124 unsigned int fidx = flow->fidx;
1127 ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1130 return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1134 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1135 const struct rte_flow_action *action, void *data,
1136 struct rte_flow_error *e)
1138 struct adapter *adap = ethdev2adap(flow->dev);
1139 struct ch_filter_specification fs;
1140 struct rte_flow_query_count *c;
1141 struct filter_entry *f;
1149 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1150 return rte_flow_error_set(e, ENOTSUP,
1151 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1152 "only count supported for query");
1155 * This is a valid operation, Since we are allowed to do chelsio
1156 * specific operations in rte side of our code but not vise-versa
1158 * So, fs can be queried/modified here BUT rte_flow_query_count
1159 * cannot be worked on by the lower layer since we want to maintain
1160 * it as rte_flow agnostic.
1163 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1164 &fs, "filter hit counters were not"
1165 " enabled during filter creation");
1167 c = (struct rte_flow_query_count *)data;
1169 t4_os_lock(&adap->flow_lock);
1170 ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1172 rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1173 f, "cxgbe pmd failed to perform query");
1177 /* Query was successful */
1181 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1184 t4_os_unlock(&adap->flow_lock);
1189 cxgbe_flow_validate(struct rte_eth_dev *dev,
1190 const struct rte_flow_attr *attr,
1191 const struct rte_flow_item item[],
1192 const struct rte_flow_action action[],
1193 struct rte_flow_error *e)
1195 struct adapter *adap = ethdev2adap(dev);
1196 struct rte_flow *flow;
1200 flow = t4_os_alloc(sizeof(struct rte_flow));
1202 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1204 "Unable to allocate memory for filter_entry");
1206 flow->item_parser = parseitem;
1209 ret = cxgbe_flow_parse(flow, attr, item, action, e);
1215 if (cxgbe_validate_filter(adap, &flow->fs)) {
1217 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1219 "validation failed. Check f/w config file.");
1222 t4_os_lock(&adap->flow_lock);
1223 if (cxgbe_get_fidx(flow, &fidx)) {
1224 ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1225 NULL, "no memory in tcam.");
1229 if (cxgbe_verify_fidx(flow, fidx, 0)) {
1230 ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1231 NULL, "validation failed");
1236 t4_os_unlock(&adap->flow_lock);
1242 * @ret : > 0 filter destroyed succsesfully
1243 * < 0 error destroying filter
1244 * == 1 filter not active / not found
1247 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1249 if (f && (f->valid || f->pending) &&
1250 f->dev == dev && /* Only if user has asked for this port */
1251 f->private) /* We (rte_flow) created this filter */
1252 return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1256 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1258 struct adapter *adap = ethdev2adap(dev);
1262 t4_os_lock(&adap->flow_lock);
1263 if (adap->tids.ftid_tab) {
1264 struct filter_entry *f = &adap->tids.ftid_tab[0];
1266 for (i = 0; i < adap->tids.nftids; i++, f++) {
1267 ret = cxgbe_check_n_destroy(f, dev);
1269 rte_flow_error_set(e, ret,
1270 RTE_FLOW_ERROR_TYPE_HANDLE,
1272 "error destroying TCAM "
1279 if (is_hashfilter(adap) && adap->tids.tid_tab) {
1280 struct filter_entry *f;
1282 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1283 f = (struct filter_entry *)adap->tids.tid_tab[i];
1285 ret = cxgbe_check_n_destroy(f, dev);
1287 rte_flow_error_set(e, ret,
1288 RTE_FLOW_ERROR_TYPE_HANDLE,
1290 "error destroying HASH "
1298 t4_os_unlock(&adap->flow_lock);
1299 return ret >= 0 ? 0 : ret;
1302 static const struct rte_flow_ops cxgbe_flow_ops = {
1303 .validate = cxgbe_flow_validate,
1304 .create = cxgbe_flow_create,
1305 .destroy = cxgbe_flow_destroy,
1306 .flush = cxgbe_flow_flush,
1307 .query = cxgbe_flow_query,
1312 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1313 enum rte_filter_type filter_type,
1314 enum rte_filter_op filter_op,
1320 switch (filter_type) {
1321 case RTE_ETH_FILTER_GENERIC:
1322 if (filter_op != RTE_ETH_FILTER_GET)
1324 *(const void **)arg = &cxgbe_flow_ops;