1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
14 #include "enic_compat.h"
19 #define FLOW_TRACE() \
20 rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
22 #define FLOW_LOG(level, fmt, args...) \
23 rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
26 /** Info about how to copy items into enic filters. */
28 /** Function for copying and validating an item. */
29 int (*copy_item)(const struct rte_flow_item *item,
30 struct filter_v2 *enic_filter, u8 *inner_ofst);
31 /** List of valid previous items. */
32 const enum rte_flow_item_type * const prev_items;
33 /** True if it's OK for this item to be the first item. For some NIC
34 * versions, it's invalid to start the stack above layer 3.
36 const u8 valid_start_item;
39 /** Filtering capabilities for various NIC and firmware versions. */
40 struct enic_filter_cap {
41 /** list of valid items and their handlers and attributes. */
42 const struct enic_items *item_info;
45 /* functions for copying flow actions into enic actions */
46 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
47 struct filter_action_v2 *enic_action);
49 /* functions for copying items into enic filters */
50 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
51 struct filter_v2 *enic_filter, u8 *inner_ofst);
53 /** Action capabilities for various NICs. */
54 struct enic_action_cap {
55 /** list of valid actions */
56 const enum rte_flow_action_type *actions;
57 /** copy function for a particular NIC */
58 int (*copy_fn)(const struct rte_flow_action actions[],
59 struct filter_action_v2 *enic_action);
62 /* Forward declarations */
63 static enic_copy_item_fn enic_copy_item_ipv4_v1;
64 static enic_copy_item_fn enic_copy_item_udp_v1;
65 static enic_copy_item_fn enic_copy_item_tcp_v1;
66 static enic_copy_item_fn enic_copy_item_eth_v2;
67 static enic_copy_item_fn enic_copy_item_vlan_v2;
68 static enic_copy_item_fn enic_copy_item_ipv4_v2;
69 static enic_copy_item_fn enic_copy_item_ipv6_v2;
70 static enic_copy_item_fn enic_copy_item_udp_v2;
71 static enic_copy_item_fn enic_copy_item_tcp_v2;
72 static enic_copy_item_fn enic_copy_item_sctp_v2;
73 static enic_copy_item_fn enic_copy_item_sctp_v2;
74 static enic_copy_item_fn enic_copy_item_vxlan_v2;
75 static copy_action_fn enic_copy_action_v1;
76 static copy_action_fn enic_copy_action_v2;
79 * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
82 static const struct enic_items enic_items_v1[] = {
83 [RTE_FLOW_ITEM_TYPE_IPV4] = {
84 .copy_item = enic_copy_item_ipv4_v1,
85 .valid_start_item = 1,
86 .prev_items = (const enum rte_flow_item_type[]) {
87 RTE_FLOW_ITEM_TYPE_END,
90 [RTE_FLOW_ITEM_TYPE_UDP] = {
91 .copy_item = enic_copy_item_udp_v1,
92 .valid_start_item = 0,
93 .prev_items = (const enum rte_flow_item_type[]) {
94 RTE_FLOW_ITEM_TYPE_IPV4,
95 RTE_FLOW_ITEM_TYPE_END,
98 [RTE_FLOW_ITEM_TYPE_TCP] = {
99 .copy_item = enic_copy_item_tcp_v1,
100 .valid_start_item = 0,
101 .prev_items = (const enum rte_flow_item_type[]) {
102 RTE_FLOW_ITEM_TYPE_IPV4,
103 RTE_FLOW_ITEM_TYPE_END,
109 * NICs have Advanced Filters capability but they are disabled. This means
110 * that layer 3 must be specified.
112 static const struct enic_items enic_items_v2[] = {
113 [RTE_FLOW_ITEM_TYPE_ETH] = {
114 .copy_item = enic_copy_item_eth_v2,
115 .valid_start_item = 1,
116 .prev_items = (const enum rte_flow_item_type[]) {
117 RTE_FLOW_ITEM_TYPE_VXLAN,
118 RTE_FLOW_ITEM_TYPE_END,
121 [RTE_FLOW_ITEM_TYPE_VLAN] = {
122 .copy_item = enic_copy_item_vlan_v2,
123 .valid_start_item = 1,
124 .prev_items = (const enum rte_flow_item_type[]) {
125 RTE_FLOW_ITEM_TYPE_ETH,
126 RTE_FLOW_ITEM_TYPE_END,
129 [RTE_FLOW_ITEM_TYPE_IPV4] = {
130 .copy_item = enic_copy_item_ipv4_v2,
131 .valid_start_item = 1,
132 .prev_items = (const enum rte_flow_item_type[]) {
133 RTE_FLOW_ITEM_TYPE_ETH,
134 RTE_FLOW_ITEM_TYPE_VLAN,
135 RTE_FLOW_ITEM_TYPE_END,
138 [RTE_FLOW_ITEM_TYPE_IPV6] = {
139 .copy_item = enic_copy_item_ipv6_v2,
140 .valid_start_item = 1,
141 .prev_items = (const enum rte_flow_item_type[]) {
142 RTE_FLOW_ITEM_TYPE_ETH,
143 RTE_FLOW_ITEM_TYPE_VLAN,
144 RTE_FLOW_ITEM_TYPE_END,
147 [RTE_FLOW_ITEM_TYPE_UDP] = {
148 .copy_item = enic_copy_item_udp_v2,
149 .valid_start_item = 0,
150 .prev_items = (const enum rte_flow_item_type[]) {
151 RTE_FLOW_ITEM_TYPE_IPV4,
152 RTE_FLOW_ITEM_TYPE_IPV6,
153 RTE_FLOW_ITEM_TYPE_END,
156 [RTE_FLOW_ITEM_TYPE_TCP] = {
157 .copy_item = enic_copy_item_tcp_v2,
158 .valid_start_item = 0,
159 .prev_items = (const enum rte_flow_item_type[]) {
160 RTE_FLOW_ITEM_TYPE_IPV4,
161 RTE_FLOW_ITEM_TYPE_IPV6,
162 RTE_FLOW_ITEM_TYPE_END,
165 [RTE_FLOW_ITEM_TYPE_SCTP] = {
166 .copy_item = enic_copy_item_sctp_v2,
167 .valid_start_item = 0,
168 .prev_items = (const enum rte_flow_item_type[]) {
169 RTE_FLOW_ITEM_TYPE_IPV4,
170 RTE_FLOW_ITEM_TYPE_IPV6,
171 RTE_FLOW_ITEM_TYPE_END,
174 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
175 .copy_item = enic_copy_item_vxlan_v2,
176 .valid_start_item = 0,
177 .prev_items = (const enum rte_flow_item_type[]) {
178 RTE_FLOW_ITEM_TYPE_UDP,
179 RTE_FLOW_ITEM_TYPE_END,
184 /** NICs with Advanced filters enabled */
185 static const struct enic_items enic_items_v3[] = {
186 [RTE_FLOW_ITEM_TYPE_ETH] = {
187 .copy_item = enic_copy_item_eth_v2,
188 .valid_start_item = 1,
189 .prev_items = (const enum rte_flow_item_type[]) {
190 RTE_FLOW_ITEM_TYPE_VXLAN,
191 RTE_FLOW_ITEM_TYPE_END,
194 [RTE_FLOW_ITEM_TYPE_VLAN] = {
195 .copy_item = enic_copy_item_vlan_v2,
196 .valid_start_item = 1,
197 .prev_items = (const enum rte_flow_item_type[]) {
198 RTE_FLOW_ITEM_TYPE_ETH,
199 RTE_FLOW_ITEM_TYPE_END,
202 [RTE_FLOW_ITEM_TYPE_IPV4] = {
203 .copy_item = enic_copy_item_ipv4_v2,
204 .valid_start_item = 1,
205 .prev_items = (const enum rte_flow_item_type[]) {
206 RTE_FLOW_ITEM_TYPE_ETH,
207 RTE_FLOW_ITEM_TYPE_VLAN,
208 RTE_FLOW_ITEM_TYPE_END,
211 [RTE_FLOW_ITEM_TYPE_IPV6] = {
212 .copy_item = enic_copy_item_ipv6_v2,
213 .valid_start_item = 1,
214 .prev_items = (const enum rte_flow_item_type[]) {
215 RTE_FLOW_ITEM_TYPE_ETH,
216 RTE_FLOW_ITEM_TYPE_VLAN,
217 RTE_FLOW_ITEM_TYPE_END,
220 [RTE_FLOW_ITEM_TYPE_UDP] = {
221 .copy_item = enic_copy_item_udp_v2,
222 .valid_start_item = 1,
223 .prev_items = (const enum rte_flow_item_type[]) {
224 RTE_FLOW_ITEM_TYPE_IPV4,
225 RTE_FLOW_ITEM_TYPE_IPV6,
226 RTE_FLOW_ITEM_TYPE_END,
229 [RTE_FLOW_ITEM_TYPE_TCP] = {
230 .copy_item = enic_copy_item_tcp_v2,
231 .valid_start_item = 1,
232 .prev_items = (const enum rte_flow_item_type[]) {
233 RTE_FLOW_ITEM_TYPE_IPV4,
234 RTE_FLOW_ITEM_TYPE_IPV6,
235 RTE_FLOW_ITEM_TYPE_END,
238 [RTE_FLOW_ITEM_TYPE_SCTP] = {
239 .copy_item = enic_copy_item_sctp_v2,
240 .valid_start_item = 1,
241 .prev_items = (const enum rte_flow_item_type[]) {
242 RTE_FLOW_ITEM_TYPE_IPV4,
243 RTE_FLOW_ITEM_TYPE_IPV6,
244 RTE_FLOW_ITEM_TYPE_END,
247 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
248 .copy_item = enic_copy_item_vxlan_v2,
249 .valid_start_item = 1,
250 .prev_items = (const enum rte_flow_item_type[]) {
251 RTE_FLOW_ITEM_TYPE_UDP,
252 RTE_FLOW_ITEM_TYPE_END,
257 /** Filtering capabilities indexed this NICs supported filter type. */
258 static const struct enic_filter_cap enic_filter_cap[] = {
259 [FILTER_IPV4_5TUPLE] = {
260 .item_info = enic_items_v1,
262 [FILTER_USNIC_IP] = {
263 .item_info = enic_items_v2,
266 .item_info = enic_items_v3,
270 /** Supported actions for older NICs */
271 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
272 RTE_FLOW_ACTION_TYPE_QUEUE,
273 RTE_FLOW_ACTION_TYPE_END,
276 /** Supported actions for newer NICs */
277 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
278 RTE_FLOW_ACTION_TYPE_QUEUE,
279 RTE_FLOW_ACTION_TYPE_MARK,
280 RTE_FLOW_ACTION_TYPE_FLAG,
281 RTE_FLOW_ACTION_TYPE_END,
284 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
285 RTE_FLOW_ACTION_TYPE_QUEUE,
286 RTE_FLOW_ACTION_TYPE_MARK,
287 RTE_FLOW_ACTION_TYPE_FLAG,
288 RTE_FLOW_ACTION_TYPE_DROP,
289 RTE_FLOW_ACTION_TYPE_END,
292 /** Action capabilities indexed by NIC version information */
293 static const struct enic_action_cap enic_action_cap[] = {
294 [FILTER_ACTION_RQ_STEERING_FLAG] = {
295 .actions = enic_supported_actions_v1,
296 .copy_fn = enic_copy_action_v1,
298 [FILTER_ACTION_FILTER_ID_FLAG] = {
299 .actions = enic_supported_actions_v2_id,
300 .copy_fn = enic_copy_action_v2,
302 [FILTER_ACTION_DROP_FLAG] = {
303 .actions = enic_supported_actions_v2_drop,
304 .copy_fn = enic_copy_action_v2,
309 mask_exact_match(const u8 *supported, const u8 *supplied,
313 for (i = 0; i < size; i++) {
314 if (supported[i] != supplied[i])
321 * Copy IPv4 item into version 1 NIC filter.
324 * Item specification.
325 * @param enic_filter[out]
326 * Partially filled in NIC filter structure.
327 * @param inner_ofst[in]
328 * Should always be 0 for version 1.
331 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
332 struct filter_v2 *enic_filter, u8 *inner_ofst)
334 const struct rte_flow_item_ipv4 *spec = item->spec;
335 const struct rte_flow_item_ipv4 *mask = item->mask;
336 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
337 struct ipv4_hdr supported_mask = {
338 .src_addr = 0xffffffff,
339 .dst_addr = 0xffffffff,
348 mask = &rte_flow_item_ipv4_mask;
350 /* This is an exact match filter, both fields must be set */
351 if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
352 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
356 /* check that the suppied mask exactly matches capabilty */
357 if (!mask_exact_match((const u8 *)&supported_mask,
358 (const u8 *)item->mask, sizeof(*mask))) {
359 FLOW_LOG(ERR, "IPv4 exact match mask");
363 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
364 enic_5tup->src_addr = spec->hdr.src_addr;
365 enic_5tup->dst_addr = spec->hdr.dst_addr;
371 * Copy UDP item into version 1 NIC filter.
374 * Item specification.
375 * @param enic_filter[out]
376 * Partially filled in NIC filter structure.
377 * @param inner_ofst[in]
378 * Should always be 0 for version 1.
381 enic_copy_item_udp_v1(const struct rte_flow_item *item,
382 struct filter_v2 *enic_filter, u8 *inner_ofst)
384 const struct rte_flow_item_udp *spec = item->spec;
385 const struct rte_flow_item_udp *mask = item->mask;
386 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
387 struct udp_hdr supported_mask = {
398 mask = &rte_flow_item_udp_mask;
400 /* This is an exact match filter, both ports must be set */
401 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
402 FLOW_LOG(ERR, "UDP exact match src/dst addr");
406 /* check that the suppied mask exactly matches capabilty */
407 if (!mask_exact_match((const u8 *)&supported_mask,
408 (const u8 *)item->mask, sizeof(*mask))) {
409 FLOW_LOG(ERR, "UDP exact match mask");
413 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
414 enic_5tup->src_port = spec->hdr.src_port;
415 enic_5tup->dst_port = spec->hdr.dst_port;
416 enic_5tup->protocol = PROTO_UDP;
422 * Copy TCP item into version 1 NIC filter.
425 * Item specification.
426 * @param enic_filter[out]
427 * Partially filled in NIC filter structure.
428 * @param inner_ofst[in]
429 * Should always be 0 for version 1.
432 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
433 struct filter_v2 *enic_filter, u8 *inner_ofst)
435 const struct rte_flow_item_tcp *spec = item->spec;
436 const struct rte_flow_item_tcp *mask = item->mask;
437 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
438 struct tcp_hdr supported_mask = {
449 mask = &rte_flow_item_tcp_mask;
451 /* This is an exact match filter, both ports must be set */
452 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
453 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
457 /* check that the suppied mask exactly matches capabilty */
458 if (!mask_exact_match((const u8 *)&supported_mask,
459 (const u8 *)item->mask, sizeof(*mask))) {
460 FLOW_LOG(ERR, "TCP exact match mask");
464 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
465 enic_5tup->src_port = spec->hdr.src_port;
466 enic_5tup->dst_port = spec->hdr.dst_port;
467 enic_5tup->protocol = PROTO_TCP;
473 * Copy ETH item into version 2 NIC filter.
476 * Item specification.
477 * @param enic_filter[out]
478 * Partially filled in NIC filter structure.
479 * @param inner_ofst[in]
480 * If zero, this is an outer header. If non-zero, this is the offset into L5
481 * where the header begins.
484 enic_copy_item_eth_v2(const struct rte_flow_item *item,
485 struct filter_v2 *enic_filter, u8 *inner_ofst)
487 struct ether_hdr enic_spec;
488 struct ether_hdr enic_mask;
489 const struct rte_flow_item_eth *spec = item->spec;
490 const struct rte_flow_item_eth *mask = item->mask;
491 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
495 /* Match all if no spec */
500 mask = &rte_flow_item_eth_mask;
502 memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
504 memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
507 memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
509 memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
511 enic_spec.ether_type = spec->type;
512 enic_mask.ether_type = mask->type;
514 if (*inner_ofst == 0) {
516 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
517 sizeof(struct ether_hdr));
518 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
519 sizeof(struct ether_hdr));
522 if ((*inner_ofst + sizeof(struct ether_hdr)) >
523 FILTER_GENERIC_1_KEY_LEN)
525 /* Offset into L5 where inner Ethernet header goes */
526 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
527 &enic_mask, sizeof(struct ether_hdr));
528 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
529 &enic_spec, sizeof(struct ether_hdr));
530 *inner_ofst += sizeof(struct ether_hdr);
536 * Copy VLAN item into version 2 NIC filter.
539 * Item specification.
540 * @param enic_filter[out]
541 * Partially filled in NIC filter structure.
542 * @param inner_ofst[in]
543 * If zero, this is an outer header. If non-zero, this is the offset into L5
544 * where the header begins.
547 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
548 struct filter_v2 *enic_filter, u8 *inner_ofst)
550 const struct rte_flow_item_vlan *spec = item->spec;
551 const struct rte_flow_item_vlan *mask = item->mask;
552 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
556 /* Match all if no spec */
560 /* Don't support filtering in tpid */
565 mask = &rte_flow_item_vlan_mask;
566 RTE_ASSERT(mask->tpid == 0);
569 if (*inner_ofst == 0) {
570 /* Outer header. Use the vlan mask/val fields */
571 gp->mask_vlan = mask->tci;
572 gp->val_vlan = spec->tci;
574 /* Inner header. Mask/Val start at *inner_ofst into L5 */
575 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
576 FILTER_GENERIC_1_KEY_LEN)
578 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
579 mask, sizeof(struct vlan_hdr));
580 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
581 spec, sizeof(struct vlan_hdr));
582 *inner_ofst += sizeof(struct vlan_hdr);
588 * Copy IPv4 item into version 2 NIC filter.
591 * Item specification.
592 * @param enic_filter[out]
593 * Partially filled in NIC filter structure.
594 * @param inner_ofst[in]
595 * Must be 0. Don't support inner IPv4 filtering.
598 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
599 struct filter_v2 *enic_filter, u8 *inner_ofst)
601 const struct rte_flow_item_ipv4 *spec = item->spec;
602 const struct rte_flow_item_ipv4 *mask = item->mask;
603 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
607 if (*inner_ofst == 0) {
609 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
610 gp->val_flags |= FILTER_GENERIC_1_IPV4;
612 /* Match all if no spec */
617 mask = &rte_flow_item_ipv4_mask;
619 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
620 sizeof(struct ipv4_hdr));
621 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
622 sizeof(struct ipv4_hdr));
624 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
625 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
626 FILTER_GENERIC_1_KEY_LEN)
628 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
629 mask, sizeof(struct ipv4_hdr));
630 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
631 spec, sizeof(struct ipv4_hdr));
632 *inner_ofst += sizeof(struct ipv4_hdr);
638 * Copy IPv6 item into version 2 NIC filter.
641 * Item specification.
642 * @param enic_filter[out]
643 * Partially filled in NIC filter structure.
644 * @param inner_ofst[in]
645 * Must be 0. Don't support inner IPv6 filtering.
648 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
649 struct filter_v2 *enic_filter, u8 *inner_ofst)
651 const struct rte_flow_item_ipv6 *spec = item->spec;
652 const struct rte_flow_item_ipv6 *mask = item->mask;
653 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
658 gp->mask_flags |= FILTER_GENERIC_1_IPV6;
659 gp->val_flags |= FILTER_GENERIC_1_IPV6;
661 /* Match all if no spec */
666 mask = &rte_flow_item_ipv6_mask;
668 if (*inner_ofst == 0) {
669 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
670 sizeof(struct ipv6_hdr));
671 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
672 sizeof(struct ipv6_hdr));
674 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
675 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
676 FILTER_GENERIC_1_KEY_LEN)
678 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
679 mask, sizeof(struct ipv6_hdr));
680 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
681 spec, sizeof(struct ipv6_hdr));
682 *inner_ofst += sizeof(struct ipv6_hdr);
688 * Copy UDP item into version 2 NIC filter.
691 * Item specification.
692 * @param enic_filter[out]
693 * Partially filled in NIC filter structure.
694 * @param inner_ofst[in]
695 * Must be 0. Don't support inner UDP filtering.
698 enic_copy_item_udp_v2(const struct rte_flow_item *item,
699 struct filter_v2 *enic_filter, u8 *inner_ofst)
701 const struct rte_flow_item_udp *spec = item->spec;
702 const struct rte_flow_item_udp *mask = item->mask;
703 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
708 gp->mask_flags |= FILTER_GENERIC_1_UDP;
709 gp->val_flags |= FILTER_GENERIC_1_UDP;
711 /* Match all if no spec */
716 mask = &rte_flow_item_udp_mask;
718 if (*inner_ofst == 0) {
719 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
720 sizeof(struct udp_hdr));
721 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
722 sizeof(struct udp_hdr));
724 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
725 if ((*inner_ofst + sizeof(struct udp_hdr)) >
726 FILTER_GENERIC_1_KEY_LEN)
728 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
729 mask, sizeof(struct udp_hdr));
730 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
731 spec, sizeof(struct udp_hdr));
732 *inner_ofst += sizeof(struct udp_hdr);
738 * Copy TCP item into version 2 NIC filter.
741 * Item specification.
742 * @param enic_filter[out]
743 * Partially filled in NIC filter structure.
744 * @param inner_ofst[in]
745 * Must be 0. Don't support inner TCP filtering.
748 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
749 struct filter_v2 *enic_filter, u8 *inner_ofst)
751 const struct rte_flow_item_tcp *spec = item->spec;
752 const struct rte_flow_item_tcp *mask = item->mask;
753 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
758 gp->mask_flags |= FILTER_GENERIC_1_TCP;
759 gp->val_flags |= FILTER_GENERIC_1_TCP;
761 /* Match all if no spec */
768 if (*inner_ofst == 0) {
769 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
770 sizeof(struct tcp_hdr));
771 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
772 sizeof(struct tcp_hdr));
774 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
775 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
776 FILTER_GENERIC_1_KEY_LEN)
778 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
779 mask, sizeof(struct tcp_hdr));
780 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
781 spec, sizeof(struct tcp_hdr));
782 *inner_ofst += sizeof(struct tcp_hdr);
788 * Copy SCTP item into version 2 NIC filter.
791 * Item specification.
792 * @param enic_filter[out]
793 * Partially filled in NIC filter structure.
794 * @param inner_ofst[in]
795 * Must be 0. Don't support inner SCTP filtering.
798 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
799 struct filter_v2 *enic_filter, u8 *inner_ofst)
801 const struct rte_flow_item_sctp *spec = item->spec;
802 const struct rte_flow_item_sctp *mask = item->mask;
803 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
810 /* Match all if no spec */
815 mask = &rte_flow_item_sctp_mask;
817 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
818 sizeof(struct sctp_hdr));
819 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
820 sizeof(struct sctp_hdr));
825 * Copy UDP item into version 2 NIC filter.
828 * Item specification.
829 * @param enic_filter[out]
830 * Partially filled in NIC filter structure.
831 * @param inner_ofst[in]
832 * Must be 0. VxLAN headers always start at the beginning of L5.
835 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
836 struct filter_v2 *enic_filter, u8 *inner_ofst)
838 const struct rte_flow_item_vxlan *spec = item->spec;
839 const struct rte_flow_item_vxlan *mask = item->mask;
840 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
847 /* Match all if no spec */
852 mask = &rte_flow_item_vxlan_mask;
854 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
855 sizeof(struct vxlan_hdr));
856 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
857 sizeof(struct vxlan_hdr));
859 *inner_ofst = sizeof(struct vxlan_hdr);
864 * Return 1 if current item is valid on top of the previous one.
866 * @param prev_item[in]
867 * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
869 * @param item_info[in]
870 * Info about this item, like valid previous items.
871 * @param is_first[in]
872 * True if this the first item in the pattern.
875 item_stacking_valid(enum rte_flow_item_type prev_item,
876 const struct enic_items *item_info, u8 is_first_item)
878 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
882 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
883 if (prev_item == *allowed_items)
887 /* This is the first item in the stack. Check if that's cool */
888 if (is_first_item && item_info->valid_start_item)
895 * Build the intenal enic filter structure from the provided pattern. The
896 * pattern is validated as the items are copied.
899 * @param items_info[in]
900 * Info about this NICs item support, like valid previous items.
901 * @param enic_filter[out]
902 * NIC specfilc filters derived from the pattern.
906 enic_copy_filter(const struct rte_flow_item pattern[],
907 const struct enic_items *items_info,
908 struct filter_v2 *enic_filter,
909 struct rte_flow_error *error)
912 const struct rte_flow_item *item = pattern;
913 u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
914 enum rte_flow_item_type prev_item;
915 const struct enic_items *item_info;
917 u8 is_first_item = 1;
923 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
924 /* Get info about how to validate and copy the item. If NULL
925 * is returned the nic does not support the item.
927 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
930 item_info = &items_info[item->type];
932 /* check to see if item stacking is valid */
933 if (!item_stacking_valid(prev_item, item_info, is_first_item))
936 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
938 goto item_not_supported;
939 prev_item = item->type;
945 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
946 NULL, "enic type error");
950 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
951 item, "stacking error");
956 * Build the intenal version 1 NIC action structure from the provided pattern.
957 * The pattern is validated as the items are copied.
960 * @param enic_action[out]
961 * NIC specfilc actions derived from the actions.
965 enic_copy_action_v1(const struct rte_flow_action actions[],
966 struct filter_action_v2 *enic_action)
969 uint32_t overlap = 0;
973 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
974 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
977 switch (actions->type) {
978 case RTE_FLOW_ACTION_TYPE_QUEUE: {
979 const struct rte_flow_action_queue *queue =
980 (const struct rte_flow_action_queue *)
986 enic_action->rq_idx =
987 enic_rte_rq_idx_to_sop_idx(queue->index);
995 if (!(overlap & FATE))
997 enic_action->type = FILTER_ACTION_RQ_STEERING;
1002 * Build the intenal version 2 NIC action structure from the provided pattern.
1003 * The pattern is validated as the items are copied.
1005 * @param actions[in]
1006 * @param enic_action[out]
1007 * NIC specfilc actions derived from the actions.
1011 enic_copy_action_v2(const struct rte_flow_action actions[],
1012 struct filter_action_v2 *enic_action)
1014 enum { FATE = 1, MARK = 2, };
1015 uint32_t overlap = 0;
1019 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1020 switch (actions->type) {
1021 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1022 const struct rte_flow_action_queue *queue =
1023 (const struct rte_flow_action_queue *)
1029 enic_action->rq_idx =
1030 enic_rte_rq_idx_to_sop_idx(queue->index);
1031 enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1034 case RTE_FLOW_ACTION_TYPE_MARK: {
1035 const struct rte_flow_action_mark *mark =
1036 (const struct rte_flow_action_mark *)
1042 /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
1043 * in the range of allows mark ids.
1045 if (mark->id >= ENIC_MAGIC_FILTER_ID)
1047 enic_action->filter_id = mark->id;
1048 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1051 case RTE_FLOW_ACTION_TYPE_FLAG: {
1055 enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1056 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1059 case RTE_FLOW_ACTION_TYPE_DROP: {
1060 enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1063 case RTE_FLOW_ACTION_TYPE_VOID:
1070 if (!(overlap & FATE))
1072 enic_action->type = FILTER_ACTION_V2;
1076 /** Check if the action is supported */
1078 enic_match_action(const struct rte_flow_action *action,
1079 const enum rte_flow_action_type *supported_actions)
1081 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1082 supported_actions++) {
1083 if (action->type == *supported_actions)
1089 /** Get the NIC filter capabilties structure */
1090 static const struct enic_filter_cap *
1091 enic_get_filter_cap(struct enic *enic)
1093 if (enic->flow_filter_mode)
1094 return &enic_filter_cap[enic->flow_filter_mode];
1099 /** Get the actions for this NIC version. */
1100 static const struct enic_action_cap *
1101 enic_get_action_cap(struct enic *enic)
1103 const struct enic_action_cap *ea;
1106 actions = enic->filter_actions;
1107 if (actions & FILTER_ACTION_DROP_FLAG)
1108 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1109 else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1110 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1112 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1116 /* Debug function to dump internal NIC action structure. */
1118 enic_dump_actions(const struct filter_action_v2 *ea)
1120 if (ea->type == FILTER_ACTION_RQ_STEERING) {
1121 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1122 } else if (ea->type == FILTER_ACTION_V2) {
1123 FLOW_LOG(INFO, "Actions(V2)\n");
1124 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1125 FLOW_LOG(INFO, "\tqueue: %u\n",
1126 enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1127 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1128 FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1132 /* Debug function to dump internal NIC filter structure. */
1134 enic_dump_filter(const struct filter_v2 *filt)
1136 const struct filter_generic_1 *gp;
1139 char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1140 char l4csum[16], ipfrag[16];
1142 switch (filt->type) {
1143 case FILTER_IPV4_5TUPLE:
1144 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1146 case FILTER_USNIC_IP:
1148 /* FIXME: this should be a loop */
1149 gp = &filt->u.generic_1;
1150 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1151 gp->val_vlan, gp->mask_vlan);
1153 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1155 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1156 ? "ip4(y)" : "ip4(n)");
1158 sprintf(ip4, "%s ", "ip4(x)");
1160 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1162 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1163 ? "ip6(y)" : "ip6(n)");
1165 sprintf(ip6, "%s ", "ip6(x)");
1167 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1169 (gp->val_flags & FILTER_GENERIC_1_UDP)
1170 ? "udp(y)" : "udp(n)");
1172 sprintf(udp, "%s ", "udp(x)");
1174 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1176 (gp->val_flags & FILTER_GENERIC_1_TCP)
1177 ? "tcp(y)" : "tcp(n)");
1179 sprintf(tcp, "%s ", "tcp(x)");
1181 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1182 sprintf(tcpudp, "%s ",
1183 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1184 ? "tcpudp(y)" : "tcpudp(n)");
1186 sprintf(tcpudp, "%s ", "tcpudp(x)");
1188 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1189 sprintf(ip4csum, "%s ",
1190 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1191 ? "ip4csum(y)" : "ip4csum(n)");
1193 sprintf(ip4csum, "%s ", "ip4csum(x)");
1195 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1196 sprintf(l4csum, "%s ",
1197 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1198 ? "l4csum(y)" : "l4csum(n)");
1200 sprintf(l4csum, "%s ", "l4csum(x)");
1202 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1203 sprintf(ipfrag, "%s ",
1204 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1205 ? "ipfrag(y)" : "ipfrag(n)");
1207 sprintf(ipfrag, "%s ", "ipfrag(x)");
1208 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1209 tcp, tcpudp, ip4csum, l4csum, ipfrag);
1211 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1212 mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1213 while (mbyte && !gp->layer[i].mask[mbyte])
1219 for (j = 0; j <= mbyte; j++) {
1221 gp->layer[i].mask[j]);
1225 FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1227 for (j = 0; j <= mbyte; j++) {
1229 gp->layer[i].val[j]);
1233 FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
1237 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1242 /* Debug function to dump internal NIC flow structures. */
1244 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1246 enic_dump_filter(filt);
1247 enic_dump_actions(ea);
1252 * Internal flow parse/validate function.
1255 * This device pointer.
1256 * @param pattern[in]
1257 * @param actions[in]
1259 * @param enic_filter[out]
1260 * Internal NIC filter structure pointer.
1261 * @param enic_action[out]
1262 * Internal NIC action structure pointer.
1265 enic_flow_parse(struct rte_eth_dev *dev,
1266 const struct rte_flow_attr *attrs,
1267 const struct rte_flow_item pattern[],
1268 const struct rte_flow_action actions[],
1269 struct rte_flow_error *error,
1270 struct filter_v2 *enic_filter,
1271 struct filter_action_v2 *enic_action)
1273 unsigned int ret = 0;
1274 struct enic *enic = pmd_priv(dev);
1275 const struct enic_filter_cap *enic_filter_cap;
1276 const struct enic_action_cap *enic_action_cap;
1277 const struct rte_flow_action *action;
1281 memset(enic_filter, 0, sizeof(*enic_filter));
1282 memset(enic_action, 0, sizeof(*enic_action));
1285 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1286 NULL, "No pattern specified");
1291 rte_flow_error_set(error, EINVAL,
1292 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1293 NULL, "No action specified");
1299 rte_flow_error_set(error, ENOTSUP,
1300 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1302 "priority groups are not supported");
1304 } else if (attrs->priority) {
1305 rte_flow_error_set(error, ENOTSUP,
1306 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1308 "priorities are not supported");
1310 } else if (attrs->egress) {
1311 rte_flow_error_set(error, ENOTSUP,
1312 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1314 "egress is not supported");
1316 } else if (!attrs->ingress) {
1317 rte_flow_error_set(error, ENOTSUP,
1318 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1320 "only ingress is supported");
1325 rte_flow_error_set(error, EINVAL,
1326 RTE_FLOW_ERROR_TYPE_ATTR,
1327 NULL, "No attribute specified");
1331 /* Verify Actions. */
1332 enic_action_cap = enic_get_action_cap(enic);
1333 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1335 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1337 else if (!enic_match_action(action, enic_action_cap->actions))
1340 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1341 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1342 action, "Invalid action.");
1345 ret = enic_action_cap->copy_fn(actions, enic_action);
1347 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1348 NULL, "Unsupported action.");
1352 /* Verify Flow items. If copying the filter from flow format to enic
1353 * format fails, the flow is not supported
1355 enic_filter_cap = enic_get_filter_cap(enic);
1356 if (enic_filter_cap == NULL) {
1357 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1358 NULL, "Flow API not available");
1361 enic_filter->type = enic->flow_filter_mode;
1362 ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
1363 enic_filter, error);
1368 * Push filter/action to the NIC.
1371 * Device structure pointer.
1372 * @param enic_filter[in]
1373 * Internal NIC filter structure pointer.
1374 * @param enic_action[in]
1375 * Internal NIC action structure pointer.
1378 static struct rte_flow *
1379 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1380 struct filter_action_v2 *enic_action,
1381 struct rte_flow_error *error)
1383 struct rte_flow *flow;
1389 flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1391 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1392 NULL, "cannot allocate flow memory");
1396 /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1397 entry = enic_action->rq_idx;
1398 ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1401 flow->enic_filter_id = entry;
1402 flow->enic_filter = *enic_filter;
1404 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1405 NULL, "vnic_dev_classifier error");
1413 * Remove filter/action from the NIC.
1416 * Device structure pointer.
1417 * @param filter_id[in]
1419 * @param enic_action[in]
1420 * Internal NIC action structure pointer.
1424 enic_flow_del_filter(struct enic *enic, u16 filter_id,
1425 struct rte_flow_error *error)
1431 ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1433 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1434 NULL, "vnic_dev_classifier failed");
1439 * The following functions are callbacks for Generic flow API.
1443 * Validate a flow supported by the NIC.
1445 * @see rte_flow_validate()
1449 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1450 const struct rte_flow_item pattern[],
1451 const struct rte_flow_action actions[],
1452 struct rte_flow_error *error)
1454 struct filter_v2 enic_filter;
1455 struct filter_action_v2 enic_action;
1460 ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1461 &enic_filter, &enic_action);
1463 enic_dump_flow(&enic_action, &enic_filter);
1468 * Create a flow supported by the NIC.
1470 * @see rte_flow_create()
1473 static struct rte_flow *
1474 enic_flow_create(struct rte_eth_dev *dev,
1475 const struct rte_flow_attr *attrs,
1476 const struct rte_flow_item pattern[],
1477 const struct rte_flow_action actions[],
1478 struct rte_flow_error *error)
1481 struct filter_v2 enic_filter;
1482 struct filter_action_v2 enic_action;
1483 struct rte_flow *flow;
1484 struct enic *enic = pmd_priv(dev);
1488 ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1493 rte_spinlock_lock(&enic->flows_lock);
1494 flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1497 LIST_INSERT_HEAD(&enic->flows, flow, next);
1498 rte_spinlock_unlock(&enic->flows_lock);
1504 * Destroy a flow supported by the NIC.
1506 * @see rte_flow_destroy()
1510 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1511 __rte_unused struct rte_flow_error *error)
1513 struct enic *enic = pmd_priv(dev);
1517 rte_spinlock_lock(&enic->flows_lock);
1518 enic_flow_del_filter(enic, flow->enic_filter_id, error);
1519 LIST_REMOVE(flow, next);
1520 rte_spinlock_unlock(&enic->flows_lock);
1525 * Flush all flows on the device.
1527 * @see rte_flow_flush()
1531 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1533 struct rte_flow *flow;
1534 struct enic *enic = pmd_priv(dev);
1538 rte_spinlock_lock(&enic->flows_lock);
1540 while (!LIST_EMPTY(&enic->flows)) {
1541 flow = LIST_FIRST(&enic->flows);
1542 enic_flow_del_filter(enic, flow->enic_filter_id, error);
1543 LIST_REMOVE(flow, next);
1545 rte_spinlock_unlock(&enic->flows_lock);
1550 * Flow callback registration.
1554 const struct rte_flow_ops enic_flow_ops = {
1555 .validate = enic_flow_validate,
1556 .create = enic_flow_create,
1557 .destroy = enic_flow_destroy,
1558 .flush = enic_flow_flush,