1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
6 #include "cxgbe_flow.h"
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
10 if (!((fs)->val.elem || (fs)->mask.elem)) { \
11 (fs)->val.elem = (__v); \
12 (fs)->mask.elem = (__m); \
14 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
15 NULL, "a filter can be specified" \
20 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
22 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
23 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
26 #define CXGBE_FILL_FS(v, m, elem) \
27 __CXGBE_FILL_FS(v, m, fs, elem, e)
29 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
30 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
33 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
35 /* rte_flow specification does not allow it. */
36 if (!i->spec && (i->mask || i->last))
37 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
38 i, "last or mask given without spec");
40 * We don't support it.
41 * Although, we can support values in last as 0's or last == spec.
42 * But this will not provide user with any additional functionality
43 * and will only increase the complexity for us.
46 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
47 i, "last is not supported by chelsio pmd");
52 cxgbe_fill_filter_region(struct adapter *adap,
53 struct ch_filter_specification *fs)
55 struct tp_params *tp = &adap->params.tp;
56 u64 hash_filter_mask = tp->hash_filter_mask;
61 if (!is_hashfilter(adap))
65 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
66 0xff, 0xff, 0xff, 0xff,
67 0xff, 0xff, 0xff, 0xff,
68 0xff, 0xff, 0xff, 0xff};
69 uint8_t bitoff[16] = {0};
71 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
72 !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
73 memcmp(fs->mask.lip, biton, sizeof(biton)) ||
74 memcmp(fs->mask.fip, biton, sizeof(biton)))
77 uint32_t biton = 0xffffffff;
78 uint32_t bitoff = 0x0U;
80 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
81 !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
82 memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
83 memcmp(fs->mask.fip, &biton, sizeof(biton)))
87 if (!fs->val.lport || fs->mask.lport != 0xffff)
89 if (!fs->val.fport || fs->mask.fport != 0xffff)
92 if (tp->protocol_shift >= 0)
93 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
94 if (tp->ethertype_shift >= 0)
95 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
97 if (ntuple_mask != hash_filter_mask)
100 fs->cap = 1; /* use hash region */
104 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
105 struct ch_filter_specification *fs,
106 struct rte_flow_error *e)
108 const struct rte_flow_item_udp *val = item->spec;
109 const struct rte_flow_item_udp *umask = item->mask;
110 const struct rte_flow_item_udp *mask;
112 mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
114 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
115 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
117 "udp: only src/dst port supported");
119 CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
122 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
123 be16_to_cpu(mask->hdr.src_port), fport);
124 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
125 be16_to_cpu(mask->hdr.dst_port), lport);
130 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
131 struct ch_filter_specification *fs,
132 struct rte_flow_error *e)
134 const struct rte_flow_item_tcp *val = item->spec;
135 const struct rte_flow_item_tcp *umask = item->mask;
136 const struct rte_flow_item_tcp *mask;
138 mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
140 if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
141 mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
143 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
145 "tcp: only src/dst port supported");
147 CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
150 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
151 be16_to_cpu(mask->hdr.src_port), fport);
152 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
153 be16_to_cpu(mask->hdr.dst_port), lport);
158 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
159 struct ch_filter_specification *fs,
160 struct rte_flow_error *e)
162 const struct rte_flow_item_ipv4 *val = item->spec;
163 const struct rte_flow_item_ipv4 *umask = item->mask;
164 const struct rte_flow_item_ipv4 *mask;
166 mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
168 if (mask->hdr.time_to_live || mask->hdr.type_of_service)
169 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
170 item, "ttl/tos are not supported");
172 fs->type = FILTER_TYPE_IPV4;
173 CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
175 return 0; /* ipv4 wild card */
177 CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
178 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
179 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
185 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
186 struct ch_filter_specification *fs,
187 struct rte_flow_error *e)
189 const struct rte_flow_item_ipv6 *val = item->spec;
190 const struct rte_flow_item_ipv6 *umask = item->mask;
191 const struct rte_flow_item_ipv6 *mask;
193 mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
195 if (mask->hdr.vtc_flow ||
196 mask->hdr.payload_len || mask->hdr.hop_limits)
197 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
199 "tc/flow/hop are not supported");
201 fs->type = FILTER_TYPE_IPV6;
202 CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
204 return 0; /* ipv6 wild card */
206 CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
207 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
208 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
214 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
215 struct rte_flow_error *e)
218 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
219 attr, "attribute:<egress> is"
222 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
223 attr, "group parameter is"
226 flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
231 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
233 struct port_info *pi = ethdev2pinfo(dev);
235 if (rxq > pi->n_rx_qsets)
240 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
242 struct adapter *adap = ethdev2adap(f->dev);
243 struct ch_filter_specification fs = f->fs;
245 if (fidx >= adap->tids.nftids) {
246 dev_err(adap, "invalid flow index %d.\n", fidx);
249 if (!is_filter_set(&adap->tids, fidx, fs.type)) {
250 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
258 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
259 struct adapter *adap, unsigned int fidx)
261 if (is_filter_set(&adap->tids, fidx, fs->type)) {
262 dev_err(adap, "filter index: %d is busy.\n", fidx);
265 if (fidx >= adap->tids.nftids) {
266 dev_err(adap, "filter index (%u) >= max(%u)\n",
267 fidx, adap->tids.nftids);
275 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
278 return 0; /* Hash filters */
279 return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
280 cxgbe_validate_fidxonadd(&flow->fs,
281 ethdev2adap(flow->dev), fidx);
284 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
286 struct ch_filter_specification *fs = &flow->fs;
287 struct adapter *adap = ethdev2adap(flow->dev);
289 /* For tcam get the next available slot, if default value specified */
290 if (flow->fidx == FILTER_ID_MAX) {
293 idx = cxgbe_alloc_ftid(adap, fs->type);
295 dev_err(adap, "unable to get a filter index in tcam\n");
298 *fidx = (unsigned int)idx;
307 cxgbe_rtef_parse_actions(struct rte_flow *flow,
308 const struct rte_flow_action action[],
309 struct rte_flow_error *e)
311 struct ch_filter_specification *fs = &flow->fs;
312 const struct rte_flow_action_queue *q;
313 const struct rte_flow_action *a;
316 for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
318 case RTE_FLOW_ACTION_TYPE_VOID:
320 case RTE_FLOW_ACTION_TYPE_DROP:
322 return rte_flow_error_set(e, EINVAL,
323 RTE_FLOW_ERROR_TYPE_ACTION, a,
324 "specify only 1 pass/drop");
325 fs->action = FILTER_DROP;
327 case RTE_FLOW_ACTION_TYPE_QUEUE:
328 q = (const struct rte_flow_action_queue *)a->conf;
330 return rte_flow_error_set(e, EINVAL,
331 RTE_FLOW_ERROR_TYPE_ACTION, q,
332 "specify rx queue index");
333 if (check_rxq(flow->dev, q->index))
334 return rte_flow_error_set(e, EINVAL,
335 RTE_FLOW_ERROR_TYPE_ACTION, q,
338 return rte_flow_error_set(e, EINVAL,
339 RTE_FLOW_ERROR_TYPE_ACTION, a,
340 "specify only 1 pass/drop");
341 fs->action = FILTER_PASS;
345 case RTE_FLOW_ACTION_TYPE_COUNT:
349 /* Not supported action : return error */
350 return rte_flow_error_set(e, ENOTSUP,
351 RTE_FLOW_ERROR_TYPE_ACTION,
352 a, "Action not supported");
359 struct chrte_fparse parseitem[] = {
360 [RTE_FLOW_ITEM_TYPE_IPV4] = {
361 .fptr = ch_rte_parsetype_ipv4,
362 .dmask = &rte_flow_item_ipv4_mask,
365 [RTE_FLOW_ITEM_TYPE_IPV6] = {
366 .fptr = ch_rte_parsetype_ipv6,
367 .dmask = &rte_flow_item_ipv6_mask,
370 [RTE_FLOW_ITEM_TYPE_UDP] = {
371 .fptr = ch_rte_parsetype_udp,
372 .dmask = &rte_flow_item_udp_mask,
375 [RTE_FLOW_ITEM_TYPE_TCP] = {
376 .fptr = ch_rte_parsetype_tcp,
377 .dmask = &rte_flow_item_tcp_mask,
382 cxgbe_rtef_parse_items(struct rte_flow *flow,
383 const struct rte_flow_item items[],
384 struct rte_flow_error *e)
386 struct adapter *adap = ethdev2adap(flow->dev);
387 const struct rte_flow_item *i;
388 char repeat[ARRAY_SIZE(parseitem)] = {0};
390 for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
391 struct chrte_fparse *idx = &flow->item_parser[i->type];
394 if (i->type > ARRAY_SIZE(parseitem))
395 return rte_flow_error_set(e, ENOTSUP,
396 RTE_FLOW_ERROR_TYPE_ITEM,
397 i, "Item not supported");
400 case RTE_FLOW_ITEM_TYPE_VOID:
403 /* check if item is repeated */
405 return rte_flow_error_set(e, ENOTSUP,
406 RTE_FLOW_ERROR_TYPE_ITEM, i,
407 "parse items cannot be repeated (except void)");
410 /* validate the item */
411 ret = cxgbe_validate_item(i, e);
415 if (!idx || !idx->fptr) {
416 return rte_flow_error_set(e, ENOTSUP,
417 RTE_FLOW_ERROR_TYPE_ITEM, i,
418 "Item not supported");
420 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
427 cxgbe_fill_filter_region(adap, &flow->fs);
433 cxgbe_flow_parse(struct rte_flow *flow,
434 const struct rte_flow_attr *attr,
435 const struct rte_flow_item item[],
436 const struct rte_flow_action action[],
437 struct rte_flow_error *e)
441 /* parse user request into ch_filter_specification */
442 ret = cxgbe_rtef_parse_attr(flow, attr, e);
445 ret = cxgbe_rtef_parse_items(flow, item, e);
448 return cxgbe_rtef_parse_actions(flow, action, e);
451 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
453 struct ch_filter_specification *fs = &flow->fs;
454 struct adapter *adap = ethdev2adap(dev);
455 struct tid_info *t = &adap->tids;
456 struct filter_ctx ctx;
460 if (cxgbe_get_fidx(flow, &fidx))
462 if (cxgbe_verify_fidx(flow, fidx, 0))
465 t4_init_completion(&ctx.completion);
466 /* go create the filter */
467 err = cxgbe_set_filter(dev, fidx, fs, &ctx);
469 dev_err(adap, "Error %d while creating filter.\n", err);
473 /* Poll the FW for reply */
474 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
479 dev_err(adap, "Filter set operation timed out (%d)\n", err);
483 dev_err(adap, "Hardware error %d while creating the filter.\n",
488 if (fs->cap) { /* to destroy the filter */
489 flow->fidx = ctx.tid;
490 flow->f = lookup_tid(t, ctx.tid);
493 flow->f = &adap->tids.ftid_tab[fidx];
499 static struct rte_flow *
500 cxgbe_flow_create(struct rte_eth_dev *dev,
501 const struct rte_flow_attr *attr,
502 const struct rte_flow_item item[],
503 const struct rte_flow_action action[],
504 struct rte_flow_error *e)
506 struct rte_flow *flow;
509 flow = t4_os_alloc(sizeof(struct rte_flow));
511 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
512 NULL, "Unable to allocate memory for"
517 flow->item_parser = parseitem;
520 if (cxgbe_flow_parse(flow, attr, item, action, e)) {
525 /* go, interact with cxgbe_filter */
526 ret = __cxgbe_flow_create(dev, flow);
528 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
529 NULL, "Unable to create flow rule");
534 flow->f->private = flow; /* Will be used during flush */
539 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
541 struct adapter *adap = ethdev2adap(dev);
542 struct filter_entry *f = flow->f;
543 struct ch_filter_specification *fs;
544 struct filter_ctx ctx;
548 if (cxgbe_verify_fidx(flow, flow->fidx, 1))
551 t4_init_completion(&ctx.completion);
552 err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
554 dev_err(adap, "Error %d while deleting filter.\n", err);
558 /* Poll the FW for reply */
559 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
564 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
568 dev_err(adap, "Hardware error %d while deleting the filter.\n",
577 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
578 struct rte_flow_error *e)
582 ret = __cxgbe_flow_destroy(dev, flow);
584 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
585 flow, "error destroying filter.");
590 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
593 struct adapter *adap = ethdev2adap(flow->dev);
594 struct ch_filter_specification fs = flow->f->fs;
595 unsigned int fidx = flow->fidx;
598 ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
601 return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
605 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
606 const struct rte_flow_action *action, void *data,
607 struct rte_flow_error *e)
609 struct ch_filter_specification fs;
610 struct rte_flow_query_count *c;
611 struct filter_entry *f;
619 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
620 return rte_flow_error_set(e, ENOTSUP,
621 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
622 "only count supported for query");
625 * This is a valid operation, Since we are allowed to do chelsio
626 * specific operations in rte side of our code but not vise-versa
628 * So, fs can be queried/modified here BUT rte_flow_query_count
629 * cannot be worked on by the lower layer since we want to maintain
630 * it as rte_flow agnostic.
633 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
634 &fs, "filter hit counters were not"
635 " enabled during filter creation");
637 c = (struct rte_flow_query_count *)data;
638 ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
640 return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
641 f, "cxgbe pmd failed to"
644 /* Query was successful */
648 return 0; /* success / partial_success */
652 cxgbe_flow_validate(struct rte_eth_dev *dev,
653 const struct rte_flow_attr *attr,
654 const struct rte_flow_item item[],
655 const struct rte_flow_action action[],
656 struct rte_flow_error *e)
658 struct adapter *adap = ethdev2adap(dev);
659 struct rte_flow *flow;
663 flow = t4_os_alloc(sizeof(struct rte_flow));
665 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
667 "Unable to allocate memory for filter_entry");
669 flow->item_parser = parseitem;
672 ret = cxgbe_flow_parse(flow, attr, item, action, e);
678 if (validate_filter(adap, &flow->fs)) {
680 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
682 "validation failed. Check f/w config file.");
685 if (cxgbe_get_fidx(flow, &fidx)) {
687 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
688 NULL, "no memory in tcam.");
691 if (cxgbe_verify_fidx(flow, fidx, 0)) {
693 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
694 NULL, "validation failed");
702 * @ret : > 0 filter destroyed succsesfully
703 * < 0 error destroying filter
704 * == 1 filter not active / not found
707 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
708 struct rte_flow_error *e)
710 if (f && (f->valid || f->pending) &&
711 f->dev == dev && /* Only if user has asked for this port */
712 f->private) /* We (rte_flow) created this filter */
713 return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
718 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
720 struct adapter *adap = ethdev2adap(dev);
724 if (adap->tids.ftid_tab) {
725 struct filter_entry *f = &adap->tids.ftid_tab[0];
727 for (i = 0; i < adap->tids.nftids; i++, f++) {
728 ret = cxgbe_check_n_destroy(f, dev, e);
734 return ret >= 0 ? 0 : ret;
737 static const struct rte_flow_ops cxgbe_flow_ops = {
738 .validate = cxgbe_flow_validate,
739 .create = cxgbe_flow_create,
740 .destroy = cxgbe_flow_destroy,
741 .flush = cxgbe_flow_flush,
742 .query = cxgbe_flow_query,
747 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
748 enum rte_filter_type filter_type,
749 enum rte_filter_op filter_op,
755 switch (filter_type) {
756 case RTE_ETH_FILTER_GENERIC:
757 if (filter_op != RTE_ETH_FILTER_GET)
759 *(const void **)arg = &cxgbe_flow_ops;