1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
14 #include "enic_compat.h"
19 #define FLOW_TRACE() \
20 rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
22 #define FLOW_LOG(level, fmt, args...) \
23 rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
27 * Common arguments passed to copy_item functions. Use this structure
28 * so we can easily add new arguments.
29 * item: Item specification.
30 * filter: Partially filled in NIC filter structure.
31 * inner_ofst: If zero, this is an outer header. If non-zero, this is
32 * the offset into L5 where the header begins.
34 struct copy_item_args {
35 const struct rte_flow_item *item;
36 struct filter_v2 *filter;
40 /* functions for copying items into enic filters */
41 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
43 /** Info about how to copy items into enic filters. */
45 /** Function for copying and validating an item. */
46 enic_copy_item_fn *copy_item;
47 /** List of valid previous items. */
48 const enum rte_flow_item_type * const prev_items;
49 /** True if it's OK for this item to be the first item. For some NIC
50 * versions, it's invalid to start the stack above layer 3.
52 const u8 valid_start_item;
55 /** Filtering capabilities for various NIC and firmware versions. */
56 struct enic_filter_cap {
57 /** list of valid items and their handlers and attributes. */
58 const struct enic_items *item_info;
59 /* Max type in the above list, used to detect unsupported types */
60 enum rte_flow_item_type max_item_type;
63 /* functions for copying flow actions into enic actions */
64 typedef int (copy_action_fn)(struct enic *enic,
65 const struct rte_flow_action actions[],
66 struct filter_action_v2 *enic_action);
68 /** Action capabilities for various NICs. */
69 struct enic_action_cap {
70 /** list of valid actions */
71 const enum rte_flow_action_type *actions;
72 /** copy function for a particular NIC */
73 copy_action_fn *copy_fn;
76 /* Forward declarations */
77 static enic_copy_item_fn enic_copy_item_ipv4_v1;
78 static enic_copy_item_fn enic_copy_item_udp_v1;
79 static enic_copy_item_fn enic_copy_item_tcp_v1;
80 static enic_copy_item_fn enic_copy_item_eth_v2;
81 static enic_copy_item_fn enic_copy_item_vlan_v2;
82 static enic_copy_item_fn enic_copy_item_ipv4_v2;
83 static enic_copy_item_fn enic_copy_item_ipv6_v2;
84 static enic_copy_item_fn enic_copy_item_udp_v2;
85 static enic_copy_item_fn enic_copy_item_tcp_v2;
86 static enic_copy_item_fn enic_copy_item_sctp_v2;
87 static enic_copy_item_fn enic_copy_item_vxlan_v2;
88 static copy_action_fn enic_copy_action_v1;
89 static copy_action_fn enic_copy_action_v2;
92 * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
95 static const struct enic_items enic_items_v1[] = {
96 [RTE_FLOW_ITEM_TYPE_IPV4] = {
97 .copy_item = enic_copy_item_ipv4_v1,
98 .valid_start_item = 1,
99 .prev_items = (const enum rte_flow_item_type[]) {
100 RTE_FLOW_ITEM_TYPE_END,
103 [RTE_FLOW_ITEM_TYPE_UDP] = {
104 .copy_item = enic_copy_item_udp_v1,
105 .valid_start_item = 0,
106 .prev_items = (const enum rte_flow_item_type[]) {
107 RTE_FLOW_ITEM_TYPE_IPV4,
108 RTE_FLOW_ITEM_TYPE_END,
111 [RTE_FLOW_ITEM_TYPE_TCP] = {
112 .copy_item = enic_copy_item_tcp_v1,
113 .valid_start_item = 0,
114 .prev_items = (const enum rte_flow_item_type[]) {
115 RTE_FLOW_ITEM_TYPE_IPV4,
116 RTE_FLOW_ITEM_TYPE_END,
122 * NICs have Advanced Filters capability but they are disabled. This means
123 * that layer 3 must be specified.
125 static const struct enic_items enic_items_v2[] = {
126 [RTE_FLOW_ITEM_TYPE_ETH] = {
127 .copy_item = enic_copy_item_eth_v2,
128 .valid_start_item = 1,
129 .prev_items = (const enum rte_flow_item_type[]) {
130 RTE_FLOW_ITEM_TYPE_VXLAN,
131 RTE_FLOW_ITEM_TYPE_END,
134 [RTE_FLOW_ITEM_TYPE_VLAN] = {
135 .copy_item = enic_copy_item_vlan_v2,
136 .valid_start_item = 1,
137 .prev_items = (const enum rte_flow_item_type[]) {
138 RTE_FLOW_ITEM_TYPE_ETH,
139 RTE_FLOW_ITEM_TYPE_END,
142 [RTE_FLOW_ITEM_TYPE_IPV4] = {
143 .copy_item = enic_copy_item_ipv4_v2,
144 .valid_start_item = 1,
145 .prev_items = (const enum rte_flow_item_type[]) {
146 RTE_FLOW_ITEM_TYPE_ETH,
147 RTE_FLOW_ITEM_TYPE_VLAN,
148 RTE_FLOW_ITEM_TYPE_END,
151 [RTE_FLOW_ITEM_TYPE_IPV6] = {
152 .copy_item = enic_copy_item_ipv6_v2,
153 .valid_start_item = 1,
154 .prev_items = (const enum rte_flow_item_type[]) {
155 RTE_FLOW_ITEM_TYPE_ETH,
156 RTE_FLOW_ITEM_TYPE_VLAN,
157 RTE_FLOW_ITEM_TYPE_END,
160 [RTE_FLOW_ITEM_TYPE_UDP] = {
161 .copy_item = enic_copy_item_udp_v2,
162 .valid_start_item = 0,
163 .prev_items = (const enum rte_flow_item_type[]) {
164 RTE_FLOW_ITEM_TYPE_IPV4,
165 RTE_FLOW_ITEM_TYPE_IPV6,
166 RTE_FLOW_ITEM_TYPE_END,
169 [RTE_FLOW_ITEM_TYPE_TCP] = {
170 .copy_item = enic_copy_item_tcp_v2,
171 .valid_start_item = 0,
172 .prev_items = (const enum rte_flow_item_type[]) {
173 RTE_FLOW_ITEM_TYPE_IPV4,
174 RTE_FLOW_ITEM_TYPE_IPV6,
175 RTE_FLOW_ITEM_TYPE_END,
178 [RTE_FLOW_ITEM_TYPE_SCTP] = {
179 .copy_item = enic_copy_item_sctp_v2,
180 .valid_start_item = 0,
181 .prev_items = (const enum rte_flow_item_type[]) {
182 RTE_FLOW_ITEM_TYPE_IPV4,
183 RTE_FLOW_ITEM_TYPE_IPV6,
184 RTE_FLOW_ITEM_TYPE_END,
187 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
188 .copy_item = enic_copy_item_vxlan_v2,
189 .valid_start_item = 0,
190 .prev_items = (const enum rte_flow_item_type[]) {
191 RTE_FLOW_ITEM_TYPE_UDP,
192 RTE_FLOW_ITEM_TYPE_END,
197 /** NICs with Advanced filters enabled */
198 static const struct enic_items enic_items_v3[] = {
199 [RTE_FLOW_ITEM_TYPE_ETH] = {
200 .copy_item = enic_copy_item_eth_v2,
201 .valid_start_item = 1,
202 .prev_items = (const enum rte_flow_item_type[]) {
203 RTE_FLOW_ITEM_TYPE_VXLAN,
204 RTE_FLOW_ITEM_TYPE_END,
207 [RTE_FLOW_ITEM_TYPE_VLAN] = {
208 .copy_item = enic_copy_item_vlan_v2,
209 .valid_start_item = 1,
210 .prev_items = (const enum rte_flow_item_type[]) {
211 RTE_FLOW_ITEM_TYPE_ETH,
212 RTE_FLOW_ITEM_TYPE_END,
215 [RTE_FLOW_ITEM_TYPE_IPV4] = {
216 .copy_item = enic_copy_item_ipv4_v2,
217 .valid_start_item = 1,
218 .prev_items = (const enum rte_flow_item_type[]) {
219 RTE_FLOW_ITEM_TYPE_ETH,
220 RTE_FLOW_ITEM_TYPE_VLAN,
221 RTE_FLOW_ITEM_TYPE_END,
224 [RTE_FLOW_ITEM_TYPE_IPV6] = {
225 .copy_item = enic_copy_item_ipv6_v2,
226 .valid_start_item = 1,
227 .prev_items = (const enum rte_flow_item_type[]) {
228 RTE_FLOW_ITEM_TYPE_ETH,
229 RTE_FLOW_ITEM_TYPE_VLAN,
230 RTE_FLOW_ITEM_TYPE_END,
233 [RTE_FLOW_ITEM_TYPE_UDP] = {
234 .copy_item = enic_copy_item_udp_v2,
235 .valid_start_item = 1,
236 .prev_items = (const enum rte_flow_item_type[]) {
237 RTE_FLOW_ITEM_TYPE_IPV4,
238 RTE_FLOW_ITEM_TYPE_IPV6,
239 RTE_FLOW_ITEM_TYPE_END,
242 [RTE_FLOW_ITEM_TYPE_TCP] = {
243 .copy_item = enic_copy_item_tcp_v2,
244 .valid_start_item = 1,
245 .prev_items = (const enum rte_flow_item_type[]) {
246 RTE_FLOW_ITEM_TYPE_IPV4,
247 RTE_FLOW_ITEM_TYPE_IPV6,
248 RTE_FLOW_ITEM_TYPE_END,
251 [RTE_FLOW_ITEM_TYPE_SCTP] = {
252 .copy_item = enic_copy_item_sctp_v2,
253 .valid_start_item = 0,
254 .prev_items = (const enum rte_flow_item_type[]) {
255 RTE_FLOW_ITEM_TYPE_IPV4,
256 RTE_FLOW_ITEM_TYPE_IPV6,
257 RTE_FLOW_ITEM_TYPE_END,
260 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
261 .copy_item = enic_copy_item_vxlan_v2,
262 .valid_start_item = 1,
263 .prev_items = (const enum rte_flow_item_type[]) {
264 RTE_FLOW_ITEM_TYPE_UDP,
265 RTE_FLOW_ITEM_TYPE_END,
270 /** Filtering capabilities indexed this NICs supported filter type. */
271 static const struct enic_filter_cap enic_filter_cap[] = {
272 [FILTER_IPV4_5TUPLE] = {
273 .item_info = enic_items_v1,
274 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
276 [FILTER_USNIC_IP] = {
277 .item_info = enic_items_v2,
278 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
281 .item_info = enic_items_v3,
282 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
286 /** Supported actions for older NICs */
287 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
288 RTE_FLOW_ACTION_TYPE_QUEUE,
289 RTE_FLOW_ACTION_TYPE_END,
292 /** Supported actions for newer NICs */
293 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
294 RTE_FLOW_ACTION_TYPE_QUEUE,
295 RTE_FLOW_ACTION_TYPE_MARK,
296 RTE_FLOW_ACTION_TYPE_FLAG,
297 RTE_FLOW_ACTION_TYPE_RSS,
298 RTE_FLOW_ACTION_TYPE_PASSTHRU,
299 RTE_FLOW_ACTION_TYPE_END,
302 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
303 RTE_FLOW_ACTION_TYPE_QUEUE,
304 RTE_FLOW_ACTION_TYPE_MARK,
305 RTE_FLOW_ACTION_TYPE_FLAG,
306 RTE_FLOW_ACTION_TYPE_DROP,
307 RTE_FLOW_ACTION_TYPE_RSS,
308 RTE_FLOW_ACTION_TYPE_PASSTHRU,
309 RTE_FLOW_ACTION_TYPE_END,
312 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
313 RTE_FLOW_ACTION_TYPE_QUEUE,
314 RTE_FLOW_ACTION_TYPE_MARK,
315 RTE_FLOW_ACTION_TYPE_FLAG,
316 RTE_FLOW_ACTION_TYPE_DROP,
317 RTE_FLOW_ACTION_TYPE_COUNT,
318 RTE_FLOW_ACTION_TYPE_RSS,
319 RTE_FLOW_ACTION_TYPE_PASSTHRU,
320 RTE_FLOW_ACTION_TYPE_END,
323 /** Action capabilities indexed by NIC version information */
324 static const struct enic_action_cap enic_action_cap[] = {
325 [FILTER_ACTION_RQ_STEERING_FLAG] = {
326 .actions = enic_supported_actions_v1,
327 .copy_fn = enic_copy_action_v1,
329 [FILTER_ACTION_FILTER_ID_FLAG] = {
330 .actions = enic_supported_actions_v2_id,
331 .copy_fn = enic_copy_action_v2,
333 [FILTER_ACTION_DROP_FLAG] = {
334 .actions = enic_supported_actions_v2_drop,
335 .copy_fn = enic_copy_action_v2,
337 [FILTER_ACTION_COUNTER_FLAG] = {
338 .actions = enic_supported_actions_v2_count,
339 .copy_fn = enic_copy_action_v2,
344 mask_exact_match(const u8 *supported, const u8 *supplied,
348 for (i = 0; i < size; i++) {
349 if (supported[i] != supplied[i])
356 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
358 const struct rte_flow_item *item = arg->item;
359 struct filter_v2 *enic_filter = arg->filter;
360 uint8_t *inner_ofst = arg->inner_ofst;
361 const struct rte_flow_item_ipv4 *spec = item->spec;
362 const struct rte_flow_item_ipv4 *mask = item->mask;
363 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
364 struct ipv4_hdr supported_mask = {
365 .src_addr = 0xffffffff,
366 .dst_addr = 0xffffffff,
375 mask = &rte_flow_item_ipv4_mask;
377 /* This is an exact match filter, both fields must be set */
378 if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
379 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
383 /* check that the suppied mask exactly matches capabilty */
384 if (!mask_exact_match((const u8 *)&supported_mask,
385 (const u8 *)item->mask, sizeof(*mask))) {
386 FLOW_LOG(ERR, "IPv4 exact match mask");
390 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
391 enic_5tup->src_addr = spec->hdr.src_addr;
392 enic_5tup->dst_addr = spec->hdr.dst_addr;
398 enic_copy_item_udp_v1(struct copy_item_args *arg)
400 const struct rte_flow_item *item = arg->item;
401 struct filter_v2 *enic_filter = arg->filter;
402 uint8_t *inner_ofst = arg->inner_ofst;
403 const struct rte_flow_item_udp *spec = item->spec;
404 const struct rte_flow_item_udp *mask = item->mask;
405 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
406 struct udp_hdr supported_mask = {
417 mask = &rte_flow_item_udp_mask;
419 /* This is an exact match filter, both ports must be set */
420 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
421 FLOW_LOG(ERR, "UDP exact match src/dst addr");
425 /* check that the suppied mask exactly matches capabilty */
426 if (!mask_exact_match((const u8 *)&supported_mask,
427 (const u8 *)item->mask, sizeof(*mask))) {
428 FLOW_LOG(ERR, "UDP exact match mask");
432 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
433 enic_5tup->src_port = spec->hdr.src_port;
434 enic_5tup->dst_port = spec->hdr.dst_port;
435 enic_5tup->protocol = PROTO_UDP;
441 enic_copy_item_tcp_v1(struct copy_item_args *arg)
443 const struct rte_flow_item *item = arg->item;
444 struct filter_v2 *enic_filter = arg->filter;
445 uint8_t *inner_ofst = arg->inner_ofst;
446 const struct rte_flow_item_tcp *spec = item->spec;
447 const struct rte_flow_item_tcp *mask = item->mask;
448 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
449 struct tcp_hdr supported_mask = {
460 mask = &rte_flow_item_tcp_mask;
462 /* This is an exact match filter, both ports must be set */
463 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
464 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
468 /* check that the suppied mask exactly matches capabilty */
469 if (!mask_exact_match((const u8 *)&supported_mask,
470 (const u8 *)item->mask, sizeof(*mask))) {
471 FLOW_LOG(ERR, "TCP exact match mask");
475 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
476 enic_5tup->src_port = spec->hdr.src_port;
477 enic_5tup->dst_port = spec->hdr.dst_port;
478 enic_5tup->protocol = PROTO_TCP;
484 enic_copy_item_eth_v2(struct copy_item_args *arg)
486 const struct rte_flow_item *item = arg->item;
487 struct filter_v2 *enic_filter = arg->filter;
488 uint8_t *inner_ofst = arg->inner_ofst;
489 struct ether_hdr enic_spec;
490 struct ether_hdr enic_mask;
491 const struct rte_flow_item_eth *spec = item->spec;
492 const struct rte_flow_item_eth *mask = item->mask;
493 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
497 /* Match all if no spec */
502 mask = &rte_flow_item_eth_mask;
504 memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
506 memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
509 memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
511 memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
513 enic_spec.ether_type = spec->type;
514 enic_mask.ether_type = mask->type;
516 if (*inner_ofst == 0) {
518 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
519 sizeof(struct ether_hdr));
520 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
521 sizeof(struct ether_hdr));
524 if ((*inner_ofst + sizeof(struct ether_hdr)) >
525 FILTER_GENERIC_1_KEY_LEN)
527 /* Offset into L5 where inner Ethernet header goes */
528 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
529 &enic_mask, sizeof(struct ether_hdr));
530 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
531 &enic_spec, sizeof(struct ether_hdr));
532 *inner_ofst += sizeof(struct ether_hdr);
538 enic_copy_item_vlan_v2(struct copy_item_args *arg)
540 const struct rte_flow_item *item = arg->item;
541 struct filter_v2 *enic_filter = arg->filter;
542 uint8_t *inner_ofst = arg->inner_ofst;
543 const struct rte_flow_item_vlan *spec = item->spec;
544 const struct rte_flow_item_vlan *mask = item->mask;
545 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
549 /* Match all if no spec */
554 mask = &rte_flow_item_vlan_mask;
556 if (*inner_ofst == 0) {
557 struct ether_hdr *eth_mask =
558 (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
559 struct ether_hdr *eth_val =
560 (void *)gp->layer[FILTER_GENERIC_1_L2].val;
562 /* Outer TPID cannot be matched */
563 if (eth_mask->ether_type)
565 eth_mask->ether_type = mask->inner_type;
566 eth_val->ether_type = spec->inner_type;
568 /* Outer header. Use the vlan mask/val fields */
569 gp->mask_vlan = mask->tci;
570 gp->val_vlan = spec->tci;
572 /* Inner header. Mask/Val start at *inner_ofst into L5 */
573 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
574 FILTER_GENERIC_1_KEY_LEN)
576 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
577 mask, sizeof(struct vlan_hdr));
578 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
579 spec, sizeof(struct vlan_hdr));
580 *inner_ofst += sizeof(struct vlan_hdr);
586 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
588 const struct rte_flow_item *item = arg->item;
589 struct filter_v2 *enic_filter = arg->filter;
590 uint8_t *inner_ofst = arg->inner_ofst;
591 const struct rte_flow_item_ipv4 *spec = item->spec;
592 const struct rte_flow_item_ipv4 *mask = item->mask;
593 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
597 if (*inner_ofst == 0) {
599 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
600 gp->val_flags |= FILTER_GENERIC_1_IPV4;
602 /* Match all if no spec */
607 mask = &rte_flow_item_ipv4_mask;
609 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
610 sizeof(struct ipv4_hdr));
611 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
612 sizeof(struct ipv4_hdr));
614 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
615 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
616 FILTER_GENERIC_1_KEY_LEN)
618 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
619 mask, sizeof(struct ipv4_hdr));
620 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
621 spec, sizeof(struct ipv4_hdr));
622 *inner_ofst += sizeof(struct ipv4_hdr);
628 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
630 const struct rte_flow_item *item = arg->item;
631 struct filter_v2 *enic_filter = arg->filter;
632 uint8_t *inner_ofst = arg->inner_ofst;
633 const struct rte_flow_item_ipv6 *spec = item->spec;
634 const struct rte_flow_item_ipv6 *mask = item->mask;
635 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
640 gp->mask_flags |= FILTER_GENERIC_1_IPV6;
641 gp->val_flags |= FILTER_GENERIC_1_IPV6;
643 /* Match all if no spec */
648 mask = &rte_flow_item_ipv6_mask;
650 if (*inner_ofst == 0) {
651 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
652 sizeof(struct ipv6_hdr));
653 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
654 sizeof(struct ipv6_hdr));
656 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
657 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
658 FILTER_GENERIC_1_KEY_LEN)
660 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
661 mask, sizeof(struct ipv6_hdr));
662 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
663 spec, sizeof(struct ipv6_hdr));
664 *inner_ofst += sizeof(struct ipv6_hdr);
670 enic_copy_item_udp_v2(struct copy_item_args *arg)
672 const struct rte_flow_item *item = arg->item;
673 struct filter_v2 *enic_filter = arg->filter;
674 uint8_t *inner_ofst = arg->inner_ofst;
675 const struct rte_flow_item_udp *spec = item->spec;
676 const struct rte_flow_item_udp *mask = item->mask;
677 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
682 gp->mask_flags |= FILTER_GENERIC_1_UDP;
683 gp->val_flags |= FILTER_GENERIC_1_UDP;
685 /* Match all if no spec */
690 mask = &rte_flow_item_udp_mask;
692 if (*inner_ofst == 0) {
693 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
694 sizeof(struct udp_hdr));
695 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
696 sizeof(struct udp_hdr));
698 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
699 if ((*inner_ofst + sizeof(struct udp_hdr)) >
700 FILTER_GENERIC_1_KEY_LEN)
702 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
703 mask, sizeof(struct udp_hdr));
704 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
705 spec, sizeof(struct udp_hdr));
706 *inner_ofst += sizeof(struct udp_hdr);
712 enic_copy_item_tcp_v2(struct copy_item_args *arg)
714 const struct rte_flow_item *item = arg->item;
715 struct filter_v2 *enic_filter = arg->filter;
716 uint8_t *inner_ofst = arg->inner_ofst;
717 const struct rte_flow_item_tcp *spec = item->spec;
718 const struct rte_flow_item_tcp *mask = item->mask;
719 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
724 gp->mask_flags |= FILTER_GENERIC_1_TCP;
725 gp->val_flags |= FILTER_GENERIC_1_TCP;
727 /* Match all if no spec */
734 if (*inner_ofst == 0) {
735 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
736 sizeof(struct tcp_hdr));
737 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
738 sizeof(struct tcp_hdr));
740 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
741 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
742 FILTER_GENERIC_1_KEY_LEN)
744 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
745 mask, sizeof(struct tcp_hdr));
746 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
747 spec, sizeof(struct tcp_hdr));
748 *inner_ofst += sizeof(struct tcp_hdr);
754 enic_copy_item_sctp_v2(struct copy_item_args *arg)
756 const struct rte_flow_item *item = arg->item;
757 struct filter_v2 *enic_filter = arg->filter;
758 uint8_t *inner_ofst = arg->inner_ofst;
759 const struct rte_flow_item_sctp *spec = item->spec;
760 const struct rte_flow_item_sctp *mask = item->mask;
761 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
762 uint8_t *ip_proto_mask = NULL;
763 uint8_t *ip_proto = NULL;
771 * The NIC filter API has no flags for "match sctp", so explicitly set
772 * the protocol number in the IP pattern.
774 if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
776 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
777 ip_proto_mask = &ip->next_proto_id;
778 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
779 ip_proto = &ip->next_proto_id;
780 } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
782 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
783 ip_proto_mask = &ip->proto;
784 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
785 ip_proto = &ip->proto;
787 /* Need IPv4/IPv6 pattern first */
790 *ip_proto = IPPROTO_SCTP;
791 *ip_proto_mask = 0xff;
793 /* Match all if no spec */
798 mask = &rte_flow_item_sctp_mask;
800 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
801 sizeof(struct sctp_hdr));
802 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
803 sizeof(struct sctp_hdr));
808 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
810 const struct rte_flow_item *item = arg->item;
811 struct filter_v2 *enic_filter = arg->filter;
812 uint8_t *inner_ofst = arg->inner_ofst;
813 const struct rte_flow_item_vxlan *spec = item->spec;
814 const struct rte_flow_item_vxlan *mask = item->mask;
815 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
822 /* Match all if no spec */
827 mask = &rte_flow_item_vxlan_mask;
829 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
830 sizeof(struct vxlan_hdr));
831 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
832 sizeof(struct vxlan_hdr));
834 *inner_ofst = sizeof(struct vxlan_hdr);
839 * Return 1 if current item is valid on top of the previous one.
841 * @param prev_item[in]
842 * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
844 * @param item_info[in]
845 * Info about this item, like valid previous items.
846 * @param is_first[in]
847 * True if this the first item in the pattern.
850 item_stacking_valid(enum rte_flow_item_type prev_item,
851 const struct enic_items *item_info, u8 is_first_item)
853 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
857 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
858 if (prev_item == *allowed_items)
862 /* This is the first item in the stack. Check if that's cool */
863 if (is_first_item && item_info->valid_start_item)
870 * Build the intenal enic filter structure from the provided pattern. The
871 * pattern is validated as the items are copied.
874 * @param items_info[in]
875 * Info about this NICs item support, like valid previous items.
876 * @param enic_filter[out]
877 * NIC specfilc filters derived from the pattern.
881 enic_copy_filter(const struct rte_flow_item pattern[],
882 const struct enic_filter_cap *cap,
883 struct filter_v2 *enic_filter,
884 struct rte_flow_error *error)
887 const struct rte_flow_item *item = pattern;
888 u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
889 enum rte_flow_item_type prev_item;
890 const struct enic_items *item_info;
891 struct copy_item_args args;
892 u8 is_first_item = 1;
898 args.filter = enic_filter;
899 args.inner_ofst = &inner_ofst;
900 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
901 /* Get info about how to validate and copy the item. If NULL
902 * is returned the nic does not support the item.
904 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
907 item_info = &cap->item_info[item->type];
908 if (item->type > cap->max_item_type ||
909 item_info->copy_item == NULL) {
910 rte_flow_error_set(error, ENOTSUP,
911 RTE_FLOW_ERROR_TYPE_ITEM,
912 NULL, "Unsupported item.");
916 /* check to see if item stacking is valid */
917 if (!item_stacking_valid(prev_item, item_info, is_first_item))
921 ret = item_info->copy_item(&args);
923 goto item_not_supported;
924 prev_item = item->type;
930 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
931 NULL, "enic type error");
935 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
936 item, "stacking error");
941 * Build the intenal version 1 NIC action structure from the provided pattern.
942 * The pattern is validated as the items are copied.
945 * @param enic_action[out]
946 * NIC specfilc actions derived from the actions.
950 enic_copy_action_v1(__rte_unused struct enic *enic,
951 const struct rte_flow_action actions[],
952 struct filter_action_v2 *enic_action)
955 uint32_t overlap = 0;
959 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
960 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
963 switch (actions->type) {
964 case RTE_FLOW_ACTION_TYPE_QUEUE: {
965 const struct rte_flow_action_queue *queue =
966 (const struct rte_flow_action_queue *)
972 enic_action->rq_idx =
973 enic_rte_rq_idx_to_sop_idx(queue->index);
981 if (!(overlap & FATE))
983 enic_action->type = FILTER_ACTION_RQ_STEERING;
988 * Build the intenal version 2 NIC action structure from the provided pattern.
989 * The pattern is validated as the items are copied.
992 * @param enic_action[out]
993 * NIC specfilc actions derived from the actions.
997 enic_copy_action_v2(struct enic *enic,
998 const struct rte_flow_action actions[],
999 struct filter_action_v2 *enic_action)
1001 enum { FATE = 1, MARK = 2, };
1002 uint32_t overlap = 0;
1003 bool passthru = false;
1007 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1008 switch (actions->type) {
1009 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1010 const struct rte_flow_action_queue *queue =
1011 (const struct rte_flow_action_queue *)
1017 enic_action->rq_idx =
1018 enic_rte_rq_idx_to_sop_idx(queue->index);
1019 enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1022 case RTE_FLOW_ACTION_TYPE_MARK: {
1023 const struct rte_flow_action_mark *mark =
1024 (const struct rte_flow_action_mark *)
1031 * Map mark ID (32-bit) to filter ID (16-bit):
1032 * - Reject values > 16 bits
1033 * - Filter ID 0 is reserved for filters that steer
1034 * but not mark. So add 1 to the mark ID to avoid
1036 * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1037 * reserved for the "flag" action below.
1039 if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1041 enic_action->filter_id = mark->id + 1;
1042 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1045 case RTE_FLOW_ACTION_TYPE_FLAG: {
1049 /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1050 enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1051 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1054 case RTE_FLOW_ACTION_TYPE_DROP: {
1058 enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1061 case RTE_FLOW_ACTION_TYPE_COUNT: {
1062 enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1065 case RTE_FLOW_ACTION_TYPE_RSS: {
1066 const struct rte_flow_action_rss *rss =
1067 (const struct rte_flow_action_rss *)
1073 * Hardware does not support general RSS actions, but
1074 * we can still support the dummy one that is used to
1075 * "receive normally".
1077 allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1080 rss->types == enic->rss_hf) &&
1081 rss->queue_num == enic->rq_count &&
1083 /* Identity queue map is ok */
1084 for (i = 0; i < rss->queue_num; i++)
1085 allow = allow && (i == rss->queue[i]);
1090 /* Need MARK or FLAG */
1091 if (!(overlap & MARK))
1096 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1098 * Like RSS above, PASSTHRU + MARK may be used to
1099 * "mark and then receive normally". MARK usually comes
1100 * after PASSTHRU, so remember we have seen passthru
1101 * and check for mark later.
1109 case RTE_FLOW_ACTION_TYPE_VOID:
1116 /* Only PASSTHRU + MARK is allowed */
1117 if (passthru && !(overlap & MARK))
1119 if (!(overlap & FATE))
1121 enic_action->type = FILTER_ACTION_V2;
1125 /** Check if the action is supported */
1127 enic_match_action(const struct rte_flow_action *action,
1128 const enum rte_flow_action_type *supported_actions)
1130 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1131 supported_actions++) {
1132 if (action->type == *supported_actions)
1138 /** Get the NIC filter capabilties structure */
1139 static const struct enic_filter_cap *
1140 enic_get_filter_cap(struct enic *enic)
1142 if (enic->flow_filter_mode)
1143 return &enic_filter_cap[enic->flow_filter_mode];
1148 /** Get the actions for this NIC version. */
1149 static const struct enic_action_cap *
1150 enic_get_action_cap(struct enic *enic)
1152 const struct enic_action_cap *ea;
1155 actions = enic->filter_actions;
1156 if (actions & FILTER_ACTION_COUNTER_FLAG)
1157 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1158 else if (actions & FILTER_ACTION_DROP_FLAG)
1159 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1160 else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1161 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1163 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1167 /* Debug function to dump internal NIC action structure. */
1169 enic_dump_actions(const struct filter_action_v2 *ea)
1171 if (ea->type == FILTER_ACTION_RQ_STEERING) {
1172 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1173 } else if (ea->type == FILTER_ACTION_V2) {
1174 FLOW_LOG(INFO, "Actions(V2)\n");
1175 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1176 FLOW_LOG(INFO, "\tqueue: %u\n",
1177 enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1178 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1179 FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1183 /* Debug function to dump internal NIC filter structure. */
1185 enic_dump_filter(const struct filter_v2 *filt)
1187 const struct filter_generic_1 *gp;
1190 char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1191 char l4csum[16], ipfrag[16];
1193 switch (filt->type) {
1194 case FILTER_IPV4_5TUPLE:
1195 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1197 case FILTER_USNIC_IP:
1199 /* FIXME: this should be a loop */
1200 gp = &filt->u.generic_1;
1201 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1202 gp->val_vlan, gp->mask_vlan);
1204 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1206 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1207 ? "ip4(y)" : "ip4(n)");
1209 sprintf(ip4, "%s ", "ip4(x)");
1211 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1213 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1214 ? "ip6(y)" : "ip6(n)");
1216 sprintf(ip6, "%s ", "ip6(x)");
1218 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1220 (gp->val_flags & FILTER_GENERIC_1_UDP)
1221 ? "udp(y)" : "udp(n)");
1223 sprintf(udp, "%s ", "udp(x)");
1225 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1227 (gp->val_flags & FILTER_GENERIC_1_TCP)
1228 ? "tcp(y)" : "tcp(n)");
1230 sprintf(tcp, "%s ", "tcp(x)");
1232 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1233 sprintf(tcpudp, "%s ",
1234 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1235 ? "tcpudp(y)" : "tcpudp(n)");
1237 sprintf(tcpudp, "%s ", "tcpudp(x)");
1239 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1240 sprintf(ip4csum, "%s ",
1241 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1242 ? "ip4csum(y)" : "ip4csum(n)");
1244 sprintf(ip4csum, "%s ", "ip4csum(x)");
1246 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1247 sprintf(l4csum, "%s ",
1248 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1249 ? "l4csum(y)" : "l4csum(n)");
1251 sprintf(l4csum, "%s ", "l4csum(x)");
1253 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1254 sprintf(ipfrag, "%s ",
1255 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1256 ? "ipfrag(y)" : "ipfrag(n)");
1258 sprintf(ipfrag, "%s ", "ipfrag(x)");
1259 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1260 tcp, tcpudp, ip4csum, l4csum, ipfrag);
1262 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1263 mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1264 while (mbyte && !gp->layer[i].mask[mbyte])
1270 for (j = 0; j <= mbyte; j++) {
1272 gp->layer[i].mask[j]);
1276 FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1278 for (j = 0; j <= mbyte; j++) {
1280 gp->layer[i].val[j]);
1284 FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
1288 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1293 /* Debug function to dump internal NIC flow structures. */
1295 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1297 enic_dump_filter(filt);
1298 enic_dump_actions(ea);
1303 * Internal flow parse/validate function.
1306 * This device pointer.
1307 * @param pattern[in]
1308 * @param actions[in]
1310 * @param enic_filter[out]
1311 * Internal NIC filter structure pointer.
1312 * @param enic_action[out]
1313 * Internal NIC action structure pointer.
1316 enic_flow_parse(struct rte_eth_dev *dev,
1317 const struct rte_flow_attr *attrs,
1318 const struct rte_flow_item pattern[],
1319 const struct rte_flow_action actions[],
1320 struct rte_flow_error *error,
1321 struct filter_v2 *enic_filter,
1322 struct filter_action_v2 *enic_action)
1324 unsigned int ret = 0;
1325 struct enic *enic = pmd_priv(dev);
1326 const struct enic_filter_cap *enic_filter_cap;
1327 const struct enic_action_cap *enic_action_cap;
1328 const struct rte_flow_action *action;
1332 memset(enic_filter, 0, sizeof(*enic_filter));
1333 memset(enic_action, 0, sizeof(*enic_action));
1336 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1337 NULL, "No pattern specified");
1342 rte_flow_error_set(error, EINVAL,
1343 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1344 NULL, "No action specified");
1350 rte_flow_error_set(error, ENOTSUP,
1351 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1353 "priority groups are not supported");
1355 } else if (attrs->priority) {
1356 rte_flow_error_set(error, ENOTSUP,
1357 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1359 "priorities are not supported");
1361 } else if (attrs->egress) {
1362 rte_flow_error_set(error, ENOTSUP,
1363 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1365 "egress is not supported");
1367 } else if (attrs->transfer) {
1368 rte_flow_error_set(error, ENOTSUP,
1369 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1371 "transfer is not supported");
1373 } else if (!attrs->ingress) {
1374 rte_flow_error_set(error, ENOTSUP,
1375 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1377 "only ingress is supported");
1382 rte_flow_error_set(error, EINVAL,
1383 RTE_FLOW_ERROR_TYPE_ATTR,
1384 NULL, "No attribute specified");
1388 /* Verify Actions. */
1389 enic_action_cap = enic_get_action_cap(enic);
1390 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1392 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1394 else if (!enic_match_action(action, enic_action_cap->actions))
1397 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1398 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1399 action, "Invalid action.");
1402 ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1404 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1405 NULL, "Unsupported action.");
1409 /* Verify Flow items. If copying the filter from flow format to enic
1410 * format fails, the flow is not supported
1412 enic_filter_cap = enic_get_filter_cap(enic);
1413 if (enic_filter_cap == NULL) {
1414 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1415 NULL, "Flow API not available");
1418 enic_filter->type = enic->flow_filter_mode;
1419 ret = enic_copy_filter(pattern, enic_filter_cap,
1420 enic_filter, error);
1425 * Push filter/action to the NIC.
1428 * Device structure pointer.
1429 * @param enic_filter[in]
1430 * Internal NIC filter structure pointer.
1431 * @param enic_action[in]
1432 * Internal NIC action structure pointer.
1435 static struct rte_flow *
1436 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1437 struct filter_action_v2 *enic_action,
1438 struct rte_flow_error *error)
1440 struct rte_flow *flow;
1444 int last_max_flow_ctr;
1448 flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1450 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1451 NULL, "cannot allocate flow memory");
1455 flow->counter_idx = -1;
1456 last_max_flow_ctr = -1;
1457 if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1458 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1459 rte_flow_error_set(error, ENOMEM,
1460 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1461 NULL, "cannot allocate counter");
1462 goto unwind_flow_alloc;
1464 flow->counter_idx = ctr_idx;
1465 enic_action->counter_index = ctr_idx;
1467 /* If index is the largest, increase the counter DMA size */
1468 if (ctr_idx > enic->max_flow_counter) {
1469 err = vnic_dev_counter_dma_cfg(enic->vdev,
1470 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1473 rte_flow_error_set(error, -err,
1474 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1475 NULL, "counter DMA config failed");
1476 goto unwind_ctr_alloc;
1478 last_max_flow_ctr = enic->max_flow_counter;
1479 enic->max_flow_counter = ctr_idx;
1483 /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1484 entry = enic_action->rq_idx;
1485 err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1488 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1489 NULL, "vnic_dev_classifier error");
1490 goto unwind_ctr_dma_cfg;
1493 flow->enic_filter_id = entry;
1494 flow->enic_filter = *enic_filter;
1498 /* unwind if there are errors */
1500 if (last_max_flow_ctr != -1) {
1501 /* reduce counter DMA size */
1502 vnic_dev_counter_dma_cfg(enic->vdev,
1503 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1504 last_max_flow_ctr + 1);
1505 enic->max_flow_counter = last_max_flow_ctr;
1508 if (flow->counter_idx != -1)
1509 vnic_dev_counter_free(enic->vdev, ctr_idx);
1516 * Remove filter/action from the NIC.
1519 * Device structure pointer.
1520 * @param filter_id[in]
1522 * @param enic_action[in]
1523 * Internal NIC action structure pointer.
1527 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1528 struct rte_flow_error *error)
1535 filter_id = flow->enic_filter_id;
1536 err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1538 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1539 NULL, "vnic_dev_classifier failed");
1543 if (flow->counter_idx != -1) {
1544 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1545 dev_err(enic, "counter free failed, idx: %d\n",
1547 flow->counter_idx = -1;
1553 * The following functions are callbacks for Generic flow API.
1557 * Validate a flow supported by the NIC.
1559 * @see rte_flow_validate()
1563 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1564 const struct rte_flow_item pattern[],
1565 const struct rte_flow_action actions[],
1566 struct rte_flow_error *error)
1568 struct filter_v2 enic_filter;
1569 struct filter_action_v2 enic_action;
1574 ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1575 &enic_filter, &enic_action);
1577 enic_dump_flow(&enic_action, &enic_filter);
1582 * Create a flow supported by the NIC.
1584 * @see rte_flow_create()
1587 static struct rte_flow *
1588 enic_flow_create(struct rte_eth_dev *dev,
1589 const struct rte_flow_attr *attrs,
1590 const struct rte_flow_item pattern[],
1591 const struct rte_flow_action actions[],
1592 struct rte_flow_error *error)
1595 struct filter_v2 enic_filter;
1596 struct filter_action_v2 enic_action;
1597 struct rte_flow *flow;
1598 struct enic *enic = pmd_priv(dev);
1602 ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1607 rte_spinlock_lock(&enic->flows_lock);
1608 flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1611 LIST_INSERT_HEAD(&enic->flows, flow, next);
1612 rte_spinlock_unlock(&enic->flows_lock);
1618 * Destroy a flow supported by the NIC.
1620 * @see rte_flow_destroy()
1624 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1625 __rte_unused struct rte_flow_error *error)
1627 struct enic *enic = pmd_priv(dev);
1631 rte_spinlock_lock(&enic->flows_lock);
1632 enic_flow_del_filter(enic, flow, error);
1633 LIST_REMOVE(flow, next);
1634 rte_spinlock_unlock(&enic->flows_lock);
1640 * Flush all flows on the device.
1642 * @see rte_flow_flush()
1646 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1648 struct rte_flow *flow;
1649 struct enic *enic = pmd_priv(dev);
1653 rte_spinlock_lock(&enic->flows_lock);
1655 while (!LIST_EMPTY(&enic->flows)) {
1656 flow = LIST_FIRST(&enic->flows);
1657 enic_flow_del_filter(enic, flow, error);
1658 LIST_REMOVE(flow, next);
1661 rte_spinlock_unlock(&enic->flows_lock);
1666 enic_flow_query_count(struct rte_eth_dev *dev,
1667 struct rte_flow *flow, void *data,
1668 struct rte_flow_error *error)
1670 struct enic *enic = pmd_priv(dev);
1671 struct rte_flow_query_count *query;
1672 uint64_t packets, bytes;
1676 if (flow->counter_idx == -1) {
1677 return rte_flow_error_set(error, ENOTSUP,
1678 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1680 "flow does not have counter");
1682 query = (struct rte_flow_query_count *)data;
1683 if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1684 !!query->reset, &packets, &bytes)) {
1685 return rte_flow_error_set
1687 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1689 "cannot read counter");
1691 query->hits_set = 1;
1692 query->bytes_set = 1;
1693 query->hits = packets;
1694 query->bytes = bytes;
1699 enic_flow_query(struct rte_eth_dev *dev,
1700 struct rte_flow *flow,
1701 const struct rte_flow_action *actions,
1703 struct rte_flow_error *error)
1709 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1710 switch (actions->type) {
1711 case RTE_FLOW_ACTION_TYPE_VOID:
1713 case RTE_FLOW_ACTION_TYPE_COUNT:
1714 ret = enic_flow_query_count(dev, flow, data, error);
1717 return rte_flow_error_set(error, ENOTSUP,
1718 RTE_FLOW_ERROR_TYPE_ACTION,
1720 "action not supported");
1729 * Flow callback registration.
1733 const struct rte_flow_ops enic_flow_ops = {
1734 .validate = enic_flow_validate,
1735 .create = enic_flow_create,
1736 .destroy = enic_flow_destroy,
1737 .flush = enic_flow_flush,
1738 .query = enic_flow_query,