1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
10 if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12 NULL, "Redefined match item with" \
13 " different values found"); \
14 (fs)->val.elem = (__v); \
15 (fs)->mask.elem = (__m); \
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
20 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 #define CXGBE_FILL_FS(v, m, elem) \
25 __CXGBE_FILL_FS(v, m, fs, elem, e)
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
33 /* rte_flow specification does not allow it. */
34 if (!i->spec && (i->mask || i->last))
35 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36 i, "last or mask given without spec");
38 * We don't support it.
39 * Although, we can support values in last as 0's or last == spec.
40 * But this will not provide user with any additional functionality
41 * and will only increase the complexity for us.
44 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45 i, "last is not supported by chelsio pmd");
50 * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51 * there's only 40-bits available to store match fields.
52 * So, to save space, optimize filter spec for some common
53 * known fields that hardware can parse against incoming
54 * packets automatically.
57 cxgbe_tweak_filter_spec(struct adapter *adap,
58 struct ch_filter_specification *fs)
60 /* Save 16-bit ethertype field space, by setting corresponding
61 * 1-bit flags in the filter spec for common known ethertypes.
62 * When hardware sees these flags, it automatically infers and
63 * matches incoming packets against the corresponding ethertype.
65 if (fs->mask.ethtype == 0xffff) {
66 switch (fs->val.ethtype) {
67 case RTE_ETHER_TYPE_IPV4:
68 if (adap->params.tp.ethertype_shift < 0) {
69 fs->type = FILTER_TYPE_IPV4;
74 case RTE_ETHER_TYPE_IPV6:
75 if (adap->params.tp.ethertype_shift < 0) {
76 fs->type = FILTER_TYPE_IPV6;
81 case RTE_ETHER_TYPE_VLAN:
82 if (adap->params.tp.ethertype_shift < 0 &&
83 adap->params.tp.vlan_shift >= 0) {
84 fs->val.ivlan_vld = 1;
85 fs->mask.ivlan_vld = 1;
90 case RTE_ETHER_TYPE_QINQ:
91 if (adap->params.tp.ethertype_shift < 0 &&
92 adap->params.tp.vnic_shift >= 0) {
93 fs->val.ovlan_vld = 1;
94 fs->mask.ovlan_vld = 1;
106 cxgbe_fill_filter_region(struct adapter *adap,
107 struct ch_filter_specification *fs)
109 struct tp_params *tp = &adap->params.tp;
110 u64 hash_filter_mask = tp->hash_filter_mask;
115 if (!is_hashfilter(adap))
119 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
120 0xff, 0xff, 0xff, 0xff,
121 0xff, 0xff, 0xff, 0xff,
122 0xff, 0xff, 0xff, 0xff};
123 uint8_t bitoff[16] = {0};
125 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
126 !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
127 memcmp(fs->mask.lip, biton, sizeof(biton)) ||
128 memcmp(fs->mask.fip, biton, sizeof(biton)))
131 uint32_t biton = 0xffffffff;
132 uint32_t bitoff = 0x0U;
134 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
135 !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
136 memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
137 memcmp(fs->mask.fip, &biton, sizeof(biton)))
141 if (!fs->val.lport || fs->mask.lport != 0xffff)
143 if (!fs->val.fport || fs->mask.fport != 0xffff)
146 if (tp->protocol_shift >= 0)
147 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
148 if (tp->ethertype_shift >= 0)
149 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
150 if (tp->port_shift >= 0)
151 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
152 if (tp->macmatch_shift >= 0)
153 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
154 if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
155 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
157 if (tp->vnic_shift >= 0) {
158 if (fs->mask.ovlan_vld)
159 ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
160 fs->mask.ovlan) << tp->vnic_shift;
161 else if (fs->mask.pfvf_vld)
162 ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 |
164 fs->mask.vf) << tp->vnic_shift;
166 if (tp->tos_shift >= 0)
167 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
169 if (ntuple_mask != hash_filter_mask)
172 fs->cap = 1; /* use hash region */
176 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
177 struct ch_filter_specification *fs,
178 struct rte_flow_error *e)
180 const struct rte_flow_item_eth *spec = item->spec;
181 const struct rte_flow_item_eth *umask = item->mask;
182 const struct rte_flow_item_eth *mask;
184 /* If user has not given any mask, then use chelsio supported mask. */
185 mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
190 /* we don't support SRC_MAC filtering*/
191 if (!rte_is_zero_ether_addr(&spec->src) ||
192 (umask && !rte_is_zero_ether_addr(&umask->src)))
193 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
195 "src mac filtering not supported");
197 if (!rte_is_zero_ether_addr(&spec->dst) ||
198 (umask && !rte_is_zero_ether_addr(&umask->dst))) {
199 CXGBE_FILL_FS(0, 0x1ff, macidx);
200 CXGBE_FILL_FS_MEMCPY(spec->dst.addr_bytes, mask->dst.addr_bytes,
204 if (spec->type || (umask && umask->type))
205 CXGBE_FILL_FS(be16_to_cpu(spec->type),
206 be16_to_cpu(mask->type), ethtype);
212 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
213 struct ch_filter_specification *fs,
214 struct rte_flow_error *e)
216 const struct rte_flow_item_phy_port *val = item->spec;
217 const struct rte_flow_item_phy_port *umask = item->mask;
218 const struct rte_flow_item_phy_port *mask;
220 mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
223 return 0; /* Wildcard, match all physical ports */
225 if (val->index > 0x7)
226 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
228 "port index up to 0x7 is supported");
230 if (val->index || (umask && umask->index))
231 CXGBE_FILL_FS(val->index, mask->index, iport);
237 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
238 struct ch_filter_specification *fs,
239 struct rte_flow_error *e)
241 const struct rte_flow_item_vlan *spec = item->spec;
242 const struct rte_flow_item_vlan *umask = item->mask;
243 const struct rte_flow_item_vlan *mask;
245 /* If user has not given any mask, then use chelsio supported mask. */
246 mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
248 if (!fs->mask.ethtype)
249 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
251 "Can't parse VLAN item without knowing ethertype");
253 /* If ethertype is already set and is not VLAN (0x8100) or
254 * QINQ(0x88A8), then don't proceed further. Otherwise,
255 * reset the outer ethertype, so that it can be replaced by
256 * innermost ethertype. Note that hardware will automatically
257 * match against VLAN or QINQ packets, based on 'ivlan_vld' or
258 * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
260 if (fs->mask.ethtype) {
261 if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
262 fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
263 return rte_flow_error_set(e, EINVAL,
264 RTE_FLOW_ERROR_TYPE_ITEM,
266 "Ethertype must be 0x8100 or 0x88a8");
269 if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
270 CXGBE_FILL_FS(1, 1, ovlan_vld);
272 if (spec->tci || (umask && umask->tci))
273 CXGBE_FILL_FS(be16_to_cpu(spec->tci),
274 be16_to_cpu(mask->tci), ovlan);
275 fs->mask.ethtype = 0;
278 } else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {
279 CXGBE_FILL_FS(1, 1, ivlan_vld);
281 if (spec->tci || (umask && umask->tci))
282 CXGBE_FILL_FS(be16_to_cpu(spec->tci),
283 be16_to_cpu(mask->tci), ivlan);
284 fs->mask.ethtype = 0;
289 if (spec && (spec->inner_type || (umask && umask->inner_type)))
290 CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
291 be16_to_cpu(mask->inner_type), ethtype);
297 ch_rte_parsetype_pf(const void *dmask __rte_unused,
298 const struct rte_flow_item *item __rte_unused,
299 struct ch_filter_specification *fs,
300 struct rte_flow_error *e __rte_unused)
302 struct rte_flow *flow = (struct rte_flow *)fs->private;
303 struct rte_eth_dev *dev = flow->dev;
304 struct adapter *adap = ethdev2adap(dev);
306 CXGBE_FILL_FS(1, 1, pfvf_vld);
308 CXGBE_FILL_FS(adap->pf, 0x7, pf);
313 ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item,
314 struct ch_filter_specification *fs,
315 struct rte_flow_error *e)
317 const struct rte_flow_item_vf *umask = item->mask;
318 const struct rte_flow_item_vf *val = item->spec;
319 const struct rte_flow_item_vf *mask;
321 /* If user has not given any mask, then use chelsio supported mask. */
322 mask = umask ? umask : (const struct rte_flow_item_vf *)dmask;
324 CXGBE_FILL_FS(1, 1, pfvf_vld);
327 return 0; /* Wildcard, match all Vf */
329 if (val->id > UCHAR_MAX)
330 return rte_flow_error_set(e, EINVAL,
331 RTE_FLOW_ERROR_TYPE_ITEM,
335 if (val->id || (umask && umask->id))
336 CXGBE_FILL_FS(val->id, mask->id, vf);
342 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
343 struct ch_filter_specification *fs,
344 struct rte_flow_error *e)
346 const struct rte_flow_item_udp *val = item->spec;
347 const struct rte_flow_item_udp *umask = item->mask;
348 const struct rte_flow_item_udp *mask;
350 mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
352 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
353 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
355 "udp: only src/dst port supported");
357 CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
361 if (val->hdr.src_port || (umask && umask->hdr.src_port))
362 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
363 be16_to_cpu(mask->hdr.src_port), fport);
365 if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
366 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
367 be16_to_cpu(mask->hdr.dst_port), lport);
373 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
374 struct ch_filter_specification *fs,
375 struct rte_flow_error *e)
377 const struct rte_flow_item_tcp *val = item->spec;
378 const struct rte_flow_item_tcp *umask = item->mask;
379 const struct rte_flow_item_tcp *mask;
381 mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
383 if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
384 mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
386 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
388 "tcp: only src/dst port supported");
390 CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
394 if (val->hdr.src_port || (umask && umask->hdr.src_port))
395 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
396 be16_to_cpu(mask->hdr.src_port), fport);
398 if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
399 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
400 be16_to_cpu(mask->hdr.dst_port), lport);
406 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
407 struct ch_filter_specification *fs,
408 struct rte_flow_error *e)
410 const struct rte_flow_item_ipv4 *val = item->spec;
411 const struct rte_flow_item_ipv4 *umask = item->mask;
412 const struct rte_flow_item_ipv4 *mask;
414 mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
416 if (mask->hdr.time_to_live)
417 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
418 item, "ttl is not supported");
420 if (fs->mask.ethtype &&
421 (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
422 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
424 "Couldn't find IPv4 ethertype");
425 fs->type = FILTER_TYPE_IPV4;
427 return 0; /* ipv4 wild card */
429 if (val->hdr.next_proto_id || (umask && umask->hdr.next_proto_id))
430 CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id,
433 if (val->hdr.dst_addr || (umask && umask->hdr.dst_addr))
434 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
437 if (val->hdr.src_addr || (umask && umask->hdr.src_addr))
438 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
441 if (val->hdr.type_of_service || (umask && umask->hdr.type_of_service))
442 CXGBE_FILL_FS(val->hdr.type_of_service,
443 mask->hdr.type_of_service, tos);
449 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
450 struct ch_filter_specification *fs,
451 struct rte_flow_error *e)
453 const struct rte_flow_item_ipv6 *val = item->spec;
454 const struct rte_flow_item_ipv6 *umask = item->mask;
455 const struct rte_flow_item_ipv6 *mask;
456 u32 vtc_flow, vtc_flow_mask;
459 mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
461 vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
463 if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
464 mask->hdr.payload_len || mask->hdr.hop_limits)
465 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
467 "flow/hop are not supported");
469 if (fs->mask.ethtype &&
470 (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
471 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
473 "Couldn't find IPv6 ethertype");
474 fs->type = FILTER_TYPE_IPV6;
476 return 0; /* ipv6 wild card */
478 if (val->hdr.proto || (umask && umask->hdr.proto))
479 CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
481 vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
482 if (val->hdr.vtc_flow || (umask && umask->hdr.vtc_flow))
483 CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
484 RTE_IPV6_HDR_TC_SHIFT,
485 (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
486 RTE_IPV6_HDR_TC_SHIFT,
489 if (memcmp(val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
491 memcmp(umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
492 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
495 if (memcmp(val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
497 memcmp(umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
498 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
505 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
506 struct rte_flow_error *e)
509 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
510 attr, "attribute:<egress> is"
513 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
514 attr, "group parameter is"
517 flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
522 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
524 struct port_info *pi = ethdev2pinfo(dev);
526 if (rxq > pi->n_rx_qsets)
531 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
533 struct adapter *adap = ethdev2adap(f->dev);
534 struct ch_filter_specification fs = f->fs;
537 if (fidx >= adap->tids.nftids) {
538 dev_err(adap, "invalid flow index %d.\n", fidx);
542 nentries = cxgbe_filter_slots(adap, fs.type);
543 if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
544 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
552 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
553 struct adapter *adap, unsigned int fidx)
557 nentries = cxgbe_filter_slots(adap, fs->type);
558 if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
559 dev_err(adap, "filter index: %d is busy.\n", fidx);
563 if (fidx >= adap->tids.nftids) {
564 dev_err(adap, "filter index (%u) >= max(%u)\n",
565 fidx, adap->tids.nftids);
573 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
576 return 0; /* Hash filters */
577 return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
578 cxgbe_validate_fidxonadd(&flow->fs,
579 ethdev2adap(flow->dev), fidx);
582 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
584 struct ch_filter_specification *fs = &flow->fs;
585 struct adapter *adap = ethdev2adap(flow->dev);
587 /* For tcam get the next available slot, if default value specified */
588 if (flow->fidx == FILTER_ID_MAX) {
592 nentries = cxgbe_filter_slots(adap, fs->type);
593 idx = cxgbe_alloc_ftid(adap, nentries);
595 dev_err(adap, "unable to get a filter index in tcam\n");
598 *fidx = (unsigned int)idx;
607 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
609 const struct rte_flow_item *i;
610 int j, index = -ENOENT;
612 for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
613 if (i->type == type) {
623 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
626 * BIT_0 = [src_ip], BIT_1 = [dst_ip]
627 * BIT_2 = [src_port], BIT_3 = [dst_port]
629 * Only below cases are supported as per our spec.
633 fs->nat_mode = NAT_MODE_NONE;
636 fs->nat_mode = NAT_MODE_DIP;
639 fs->nat_mode = NAT_MODE_SIP_SP;
642 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
645 fs->nat_mode = NAT_MODE_DIP_DP;
648 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
651 fs->nat_mode = NAT_MODE_DIP_DP_SP;
654 fs->nat_mode = NAT_MODE_ALL;
664 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
665 const struct rte_flow_item items[],
667 struct ch_filter_specification *fs,
668 struct rte_flow_error *e)
670 const struct rte_flow_action_of_set_vlan_vid *vlanid;
671 const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
672 const struct rte_flow_action_of_push_vlan *pushvlan;
673 const struct rte_flow_action_set_ipv4 *ipv4;
674 const struct rte_flow_action_set_ipv6 *ipv6;
675 const struct rte_flow_action_set_tp *tp_port;
676 const struct rte_flow_action_phy_port *port;
677 const struct rte_flow_action_set_mac *mac;
682 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
683 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
685 /* If explicitly asked to push a new VLAN header,
686 * then don't set rewrite mode. Otherwise, the
687 * incoming VLAN packets will get their VLAN fields
688 * rewritten, instead of adding an additional outer
691 if (fs->newvlan != VLAN_INSERT)
692 fs->newvlan = VLAN_REWRITE;
693 tmp_vlan = fs->vlan & 0xe000;
694 fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
696 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
697 vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
699 /* If explicitly asked to push a new VLAN header,
700 * then don't set rewrite mode. Otherwise, the
701 * incoming VLAN packets will get their VLAN fields
702 * rewritten, instead of adding an additional outer
705 if (fs->newvlan != VLAN_INSERT)
706 fs->newvlan = VLAN_REWRITE;
707 tmp_vlan = fs->vlan & 0xfff;
708 fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
710 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
711 pushvlan = (const struct rte_flow_action_of_push_vlan *)
713 if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
714 return rte_flow_error_set(e, EINVAL,
715 RTE_FLOW_ERROR_TYPE_ACTION, a,
716 "only ethertype 0x8100 "
717 "supported for push vlan.");
718 fs->newvlan = VLAN_INSERT;
720 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
721 fs->newvlan = VLAN_REMOVE;
723 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
724 port = (const struct rte_flow_action_phy_port *)a->conf;
725 fs->eport = port->index;
727 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
728 item_index = cxgbe_get_flow_item_index(items,
729 RTE_FLOW_ITEM_TYPE_IPV4);
731 return rte_flow_error_set(e, EINVAL,
732 RTE_FLOW_ERROR_TYPE_ACTION, a,
733 "No RTE_FLOW_ITEM_TYPE_IPV4 "
736 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
737 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
740 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
741 item_index = cxgbe_get_flow_item_index(items,
742 RTE_FLOW_ITEM_TYPE_IPV4);
744 return rte_flow_error_set(e, EINVAL,
745 RTE_FLOW_ERROR_TYPE_ACTION, a,
746 "No RTE_FLOW_ITEM_TYPE_IPV4 "
749 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
750 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
753 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
754 item_index = cxgbe_get_flow_item_index(items,
755 RTE_FLOW_ITEM_TYPE_IPV6);
757 return rte_flow_error_set(e, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ACTION, a,
759 "No RTE_FLOW_ITEM_TYPE_IPV6 "
762 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
763 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
766 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
767 item_index = cxgbe_get_flow_item_index(items,
768 RTE_FLOW_ITEM_TYPE_IPV6);
770 return rte_flow_error_set(e, EINVAL,
771 RTE_FLOW_ERROR_TYPE_ACTION, a,
772 "No RTE_FLOW_ITEM_TYPE_IPV6 "
775 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
776 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
779 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
780 item_index = cxgbe_get_flow_item_index(items,
781 RTE_FLOW_ITEM_TYPE_TCP);
782 if (item_index < 0) {
784 cxgbe_get_flow_item_index(items,
785 RTE_FLOW_ITEM_TYPE_UDP);
787 return rte_flow_error_set(e, EINVAL,
788 RTE_FLOW_ERROR_TYPE_ACTION, a,
789 "No RTE_FLOW_ITEM_TYPE_TCP or "
790 "RTE_FLOW_ITEM_TYPE_UDP found");
793 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
794 fs->nat_fport = be16_to_cpu(tp_port->port);
797 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
798 item_index = cxgbe_get_flow_item_index(items,
799 RTE_FLOW_ITEM_TYPE_TCP);
800 if (item_index < 0) {
802 cxgbe_get_flow_item_index(items,
803 RTE_FLOW_ITEM_TYPE_UDP);
805 return rte_flow_error_set(e, EINVAL,
806 RTE_FLOW_ERROR_TYPE_ACTION, a,
807 "No RTE_FLOW_ITEM_TYPE_TCP or "
808 "RTE_FLOW_ITEM_TYPE_UDP found");
811 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
812 fs->nat_lport = be16_to_cpu(tp_port->port);
815 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
816 item_index = cxgbe_get_flow_item_index(items,
817 RTE_FLOW_ITEM_TYPE_ETH);
819 return rte_flow_error_set(e, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ACTION, a,
821 "No RTE_FLOW_ITEM_TYPE_ETH "
825 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
826 item_index = cxgbe_get_flow_item_index(items,
827 RTE_FLOW_ITEM_TYPE_ETH);
829 return rte_flow_error_set(e, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ACTION, a,
831 "No RTE_FLOW_ITEM_TYPE_ETH "
833 mac = (const struct rte_flow_action_set_mac *)a->conf;
836 memcpy(fs->smac, mac->mac_addr, sizeof(fs->smac));
838 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
839 item_index = cxgbe_get_flow_item_index(items,
840 RTE_FLOW_ITEM_TYPE_ETH);
842 return rte_flow_error_set(e, EINVAL,
843 RTE_FLOW_ERROR_TYPE_ACTION, a,
844 "No RTE_FLOW_ITEM_TYPE_ETH found");
845 mac = (const struct rte_flow_action_set_mac *)a->conf;
848 memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac));
851 /* We are not supposed to come here */
852 return rte_flow_error_set(e, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ACTION, a,
854 "Action not supported");
861 cxgbe_rtef_parse_actions(struct rte_flow *flow,
862 const struct rte_flow_item items[],
863 const struct rte_flow_action action[],
864 struct rte_flow_error *e)
866 struct ch_filter_specification *fs = &flow->fs;
867 uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
868 uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
869 const struct rte_flow_action_queue *q;
870 const struct rte_flow_action *a;
874 for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
876 case RTE_FLOW_ACTION_TYPE_VOID:
878 case RTE_FLOW_ACTION_TYPE_DROP:
880 return rte_flow_error_set(e, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ACTION, a,
882 "specify only 1 pass/drop");
883 fs->action = FILTER_DROP;
885 case RTE_FLOW_ACTION_TYPE_QUEUE:
886 q = (const struct rte_flow_action_queue *)a->conf;
888 return rte_flow_error_set(e, EINVAL,
889 RTE_FLOW_ERROR_TYPE_ACTION, q,
890 "specify rx queue index");
891 if (check_rxq(flow->dev, q->index))
892 return rte_flow_error_set(e, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ACTION, q,
896 return rte_flow_error_set(e, EINVAL,
897 RTE_FLOW_ERROR_TYPE_ACTION, a,
898 "specify only 1 pass/drop");
899 fs->action = FILTER_PASS;
903 case RTE_FLOW_ACTION_TYPE_COUNT:
906 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
909 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
912 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
913 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
914 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
915 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
916 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
917 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
920 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
921 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
924 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
925 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
926 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
927 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
929 /* We allow multiple switch actions, but switch is
930 * not compatible with either queue or drop
932 if (abit++ && fs->action != FILTER_SWITCH)
933 return rte_flow_error_set(e, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ACTION, a,
935 "overlapping action specified");
936 if (nat_ipv4 && nat_ipv6)
937 return rte_flow_error_set(e, EINVAL,
938 RTE_FLOW_ERROR_TYPE_ACTION, a,
939 "Can't have one address ipv4 and the"
942 ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
946 fs->action = FILTER_SWITCH;
949 /* Not supported action : return error */
950 return rte_flow_error_set(e, ENOTSUP,
951 RTE_FLOW_ERROR_TYPE_ACTION,
952 a, "Action not supported");
956 if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
957 return rte_flow_error_set(e, EINVAL,
958 RTE_FLOW_ERROR_TYPE_ACTION, a,
959 "Both OF_SET_VLAN_VID and "
960 "OF_SET_VLAN_PCP must be specified");
962 if (ch_rte_parse_nat(nmode, fs))
963 return rte_flow_error_set(e, EINVAL,
964 RTE_FLOW_ERROR_TYPE_ACTION, a,
965 "invalid settings for swich action");
969 static struct chrte_fparse parseitem[] = {
970 [RTE_FLOW_ITEM_TYPE_ETH] = {
971 .fptr = ch_rte_parsetype_eth,
972 .dmask = &(const struct rte_flow_item_eth){
973 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
974 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
979 [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
980 .fptr = ch_rte_parsetype_port,
981 .dmask = &(const struct rte_flow_item_phy_port){
986 [RTE_FLOW_ITEM_TYPE_VLAN] = {
987 .fptr = ch_rte_parsetype_vlan,
988 .dmask = &(const struct rte_flow_item_vlan){
990 .inner_type = 0xffff,
994 [RTE_FLOW_ITEM_TYPE_IPV4] = {
995 .fptr = ch_rte_parsetype_ipv4,
996 .dmask = &(const struct rte_flow_item_ipv4) {
998 .src_addr = RTE_BE32(0xffffffff),
999 .dst_addr = RTE_BE32(0xffffffff),
1000 .type_of_service = 0xff,
1005 [RTE_FLOW_ITEM_TYPE_IPV6] = {
1006 .fptr = ch_rte_parsetype_ipv6,
1007 .dmask = &(const struct rte_flow_item_ipv6) {
1010 "\xff\xff\xff\xff\xff\xff\xff\xff"
1011 "\xff\xff\xff\xff\xff\xff\xff\xff",
1013 "\xff\xff\xff\xff\xff\xff\xff\xff"
1014 "\xff\xff\xff\xff\xff\xff\xff\xff",
1015 .vtc_flow = RTE_BE32(0xff000000),
1020 [RTE_FLOW_ITEM_TYPE_UDP] = {
1021 .fptr = ch_rte_parsetype_udp,
1022 .dmask = &rte_flow_item_udp_mask,
1025 [RTE_FLOW_ITEM_TYPE_TCP] = {
1026 .fptr = ch_rte_parsetype_tcp,
1027 .dmask = &rte_flow_item_tcp_mask,
1030 [RTE_FLOW_ITEM_TYPE_PF] = {
1031 .fptr = ch_rte_parsetype_pf,
1035 [RTE_FLOW_ITEM_TYPE_VF] = {
1036 .fptr = ch_rte_parsetype_vf,
1037 .dmask = &(const struct rte_flow_item_vf){
1044 cxgbe_rtef_parse_items(struct rte_flow *flow,
1045 const struct rte_flow_item items[],
1046 struct rte_flow_error *e)
1048 struct adapter *adap = ethdev2adap(flow->dev);
1049 const struct rte_flow_item *i;
1050 char repeat[ARRAY_SIZE(parseitem)] = {0};
1052 for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
1053 struct chrte_fparse *idx;
1056 if (i->type >= ARRAY_SIZE(parseitem))
1057 return rte_flow_error_set(e, ENOTSUP,
1058 RTE_FLOW_ERROR_TYPE_ITEM,
1059 i, "Item not supported");
1062 case RTE_FLOW_ITEM_TYPE_VOID:
1065 /* check if item is repeated */
1066 if (repeat[i->type] &&
1067 i->type != RTE_FLOW_ITEM_TYPE_VLAN)
1068 return rte_flow_error_set(e, ENOTSUP,
1069 RTE_FLOW_ERROR_TYPE_ITEM, i,
1070 "parse items cannot be repeated(except void/vlan)");
1072 repeat[i->type] = 1;
1074 /* validate the item */
1075 ret = cxgbe_validate_item(i, e);
1079 idx = &flow->item_parser[i->type];
1080 if (!idx || !idx->fptr) {
1081 return rte_flow_error_set(e, ENOTSUP,
1082 RTE_FLOW_ERROR_TYPE_ITEM, i,
1083 "Item not supported");
1085 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
1092 cxgbe_tweak_filter_spec(adap, &flow->fs);
1093 cxgbe_fill_filter_region(adap, &flow->fs);
1099 cxgbe_flow_parse(struct rte_flow *flow,
1100 const struct rte_flow_attr *attr,
1101 const struct rte_flow_item item[],
1102 const struct rte_flow_action action[],
1103 struct rte_flow_error *e)
1106 /* parse user request into ch_filter_specification */
1107 ret = cxgbe_rtef_parse_attr(flow, attr, e);
1110 ret = cxgbe_rtef_parse_items(flow, item, e);
1113 return cxgbe_rtef_parse_actions(flow, item, action, e);
1116 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
1118 struct ch_filter_specification *fs = &flow->fs;
1119 struct adapter *adap = ethdev2adap(dev);
1120 struct tid_info *t = &adap->tids;
1121 struct filter_ctx ctx;
1125 if (cxgbe_get_fidx(flow, &fidx))
1127 if (cxgbe_verify_fidx(flow, fidx, 0))
1130 t4_init_completion(&ctx.completion);
1131 /* go create the filter */
1132 err = cxgbe_set_filter(dev, fidx, fs, &ctx);
1134 dev_err(adap, "Error %d while creating filter.\n", err);
1138 /* Poll the FW for reply */
1139 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1141 CXGBE_FLOW_POLL_CNT,
1144 dev_err(adap, "Filter set operation timed out (%d)\n", err);
1148 dev_err(adap, "Hardware error %d while creating the filter.\n",
1153 if (fs->cap) { /* to destroy the filter */
1154 flow->fidx = ctx.tid;
1155 flow->f = lookup_tid(t, ctx.tid);
1158 flow->f = &adap->tids.ftid_tab[fidx];
1164 static struct rte_flow *
1165 cxgbe_flow_create(struct rte_eth_dev *dev,
1166 const struct rte_flow_attr *attr,
1167 const struct rte_flow_item item[],
1168 const struct rte_flow_action action[],
1169 struct rte_flow_error *e)
1171 struct adapter *adap = ethdev2adap(dev);
1172 struct rte_flow *flow;
1175 flow = t4_os_alloc(sizeof(struct rte_flow));
1177 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1178 NULL, "Unable to allocate memory for"
1183 flow->item_parser = parseitem;
1185 flow->fs.private = (void *)flow;
1187 if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1192 t4_os_lock(&adap->flow_lock);
1193 /* go, interact with cxgbe_filter */
1194 ret = __cxgbe_flow_create(dev, flow);
1195 t4_os_unlock(&adap->flow_lock);
1197 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1198 NULL, "Unable to create flow rule");
1203 flow->f->private = flow; /* Will be used during flush */
1208 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1210 struct adapter *adap = ethdev2adap(dev);
1211 struct filter_entry *f = flow->f;
1212 struct ch_filter_specification *fs;
1213 struct filter_ctx ctx;
1217 if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1220 t4_init_completion(&ctx.completion);
1221 err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1223 dev_err(adap, "Error %d while deleting filter.\n", err);
1227 /* Poll the FW for reply */
1228 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1230 CXGBE_FLOW_POLL_CNT,
1233 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1237 dev_err(adap, "Hardware error %d while deleting the filter.\n",
1246 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1247 struct rte_flow_error *e)
1249 struct adapter *adap = ethdev2adap(dev);
1252 t4_os_lock(&adap->flow_lock);
1253 ret = __cxgbe_flow_destroy(dev, flow);
1254 t4_os_unlock(&adap->flow_lock);
1256 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1257 flow, "error destroying filter.");
1262 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1265 struct adapter *adap = ethdev2adap(flow->dev);
1266 struct ch_filter_specification fs = flow->f->fs;
1267 unsigned int fidx = flow->fidx;
1270 ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1273 return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1277 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1278 const struct rte_flow_action *action, void *data,
1279 struct rte_flow_error *e)
1281 struct adapter *adap = ethdev2adap(flow->dev);
1282 struct ch_filter_specification fs;
1283 struct rte_flow_query_count *c;
1284 struct filter_entry *f;
1292 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1293 return rte_flow_error_set(e, ENOTSUP,
1294 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1295 "only count supported for query");
1298 * This is a valid operation, Since we are allowed to do chelsio
1299 * specific operations in rte side of our code but not vise-versa
1301 * So, fs can be queried/modified here BUT rte_flow_query_count
1302 * cannot be worked on by the lower layer since we want to maintain
1303 * it as rte_flow agnostic.
1306 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1307 &fs, "filter hit counters were not"
1308 " enabled during filter creation");
1310 c = (struct rte_flow_query_count *)data;
1312 t4_os_lock(&adap->flow_lock);
1313 ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1315 rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1316 f, "cxgbe pmd failed to perform query");
1320 /* Query was successful */
1324 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1327 t4_os_unlock(&adap->flow_lock);
1332 cxgbe_flow_validate(struct rte_eth_dev *dev,
1333 const struct rte_flow_attr *attr,
1334 const struct rte_flow_item item[],
1335 const struct rte_flow_action action[],
1336 struct rte_flow_error *e)
1338 struct adapter *adap = ethdev2adap(dev);
1339 struct rte_flow *flow;
1343 flow = t4_os_alloc(sizeof(struct rte_flow));
1345 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1347 "Unable to allocate memory for filter_entry");
1349 flow->item_parser = parseitem;
1351 flow->fs.private = (void *)flow;
1353 ret = cxgbe_flow_parse(flow, attr, item, action, e);
1359 if (cxgbe_validate_filter(adap, &flow->fs)) {
1361 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1363 "validation failed. Check f/w config file.");
1366 t4_os_lock(&adap->flow_lock);
1367 if (cxgbe_get_fidx(flow, &fidx)) {
1368 ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1369 NULL, "no memory in tcam.");
1373 if (cxgbe_verify_fidx(flow, fidx, 0)) {
1374 ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1375 NULL, "validation failed");
1380 t4_os_unlock(&adap->flow_lock);
1386 * @ret : > 0 filter destroyed succsesfully
1387 * < 0 error destroying filter
1388 * == 1 filter not active / not found
1391 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1393 if (f && (f->valid || f->pending) &&
1394 f->dev == dev && /* Only if user has asked for this port */
1395 f->private) /* We (rte_flow) created this filter */
1396 return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1400 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1402 struct adapter *adap = ethdev2adap(dev);
1406 t4_os_lock(&adap->flow_lock);
1407 if (adap->tids.ftid_tab) {
1408 struct filter_entry *f = &adap->tids.ftid_tab[0];
1410 for (i = 0; i < adap->tids.nftids; i++, f++) {
1411 ret = cxgbe_check_n_destroy(f, dev);
1413 rte_flow_error_set(e, ret,
1414 RTE_FLOW_ERROR_TYPE_HANDLE,
1416 "error destroying TCAM "
1423 if (is_hashfilter(adap) && adap->tids.tid_tab) {
1424 struct filter_entry *f;
1426 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1427 f = (struct filter_entry *)adap->tids.tid_tab[i];
1429 ret = cxgbe_check_n_destroy(f, dev);
1431 rte_flow_error_set(e, ret,
1432 RTE_FLOW_ERROR_TYPE_HANDLE,
1434 "error destroying HASH "
1442 t4_os_unlock(&adap->flow_lock);
1443 return ret >= 0 ? 0 : ret;
1446 static const struct rte_flow_ops cxgbe_flow_ops = {
1447 .validate = cxgbe_flow_validate,
1448 .create = cxgbe_flow_create,
1449 .destroy = cxgbe_flow_destroy,
1450 .flush = cxgbe_flow_flush,
1451 .query = cxgbe_flow_query,
1456 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1457 enum rte_filter_type filter_type,
1458 enum rte_filter_op filter_op,
1464 switch (filter_type) {
1465 case RTE_ETH_FILTER_GENERIC:
1466 if (filter_op != RTE_ETH_FILTER_GET)
1468 *(const void **)arg = &cxgbe_flow_ops;