1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
14 #include "enic_compat.h"
19 #define FLOW_TRACE() \
20 rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
22 #define FLOW_LOG(level, fmt, args...) \
23 rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
26 /** Info about how to copy items into enic filters. */
28 /** Function for copying and validating an item. */
29 int (*copy_item)(const struct rte_flow_item *item,
30 struct filter_v2 *enic_filter, u8 *inner_ofst);
31 /** List of valid previous items. */
32 const enum rte_flow_item_type * const prev_items;
33 /** True if it's OK for this item to be the first item. For some NIC
34 * versions, it's invalid to start the stack above layer 3.
36 const u8 valid_start_item;
39 /** Filtering capabilities for various NIC and firmware versions. */
40 struct enic_filter_cap {
41 /** list of valid items and their handlers and attributes. */
42 const struct enic_items *item_info;
45 /* functions for copying flow actions into enic actions */
46 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
47 struct filter_action_v2 *enic_action);
49 /* functions for copying items into enic filters */
50 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
51 struct filter_v2 *enic_filter, u8 *inner_ofst);
53 /** Action capabilities for various NICs. */
54 struct enic_action_cap {
55 /** list of valid actions */
56 const enum rte_flow_action_type *actions;
57 /** copy function for a particular NIC */
58 int (*copy_fn)(const struct rte_flow_action actions[],
59 struct filter_action_v2 *enic_action);
62 /* Forward declarations */
63 static enic_copy_item_fn enic_copy_item_ipv4_v1;
64 static enic_copy_item_fn enic_copy_item_udp_v1;
65 static enic_copy_item_fn enic_copy_item_tcp_v1;
66 static enic_copy_item_fn enic_copy_item_eth_v2;
67 static enic_copy_item_fn enic_copy_item_vlan_v2;
68 static enic_copy_item_fn enic_copy_item_ipv4_v2;
69 static enic_copy_item_fn enic_copy_item_ipv6_v2;
70 static enic_copy_item_fn enic_copy_item_udp_v2;
71 static enic_copy_item_fn enic_copy_item_tcp_v2;
72 static enic_copy_item_fn enic_copy_item_sctp_v2;
73 static enic_copy_item_fn enic_copy_item_sctp_v2;
74 static enic_copy_item_fn enic_copy_item_vxlan_v2;
75 static copy_action_fn enic_copy_action_v1;
76 static copy_action_fn enic_copy_action_v2;
79 * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
82 static const struct enic_items enic_items_v1[] = {
83 [RTE_FLOW_ITEM_TYPE_IPV4] = {
84 .copy_item = enic_copy_item_ipv4_v1,
85 .valid_start_item = 1,
86 .prev_items = (const enum rte_flow_item_type[]) {
87 RTE_FLOW_ITEM_TYPE_END,
90 [RTE_FLOW_ITEM_TYPE_UDP] = {
91 .copy_item = enic_copy_item_udp_v1,
92 .valid_start_item = 0,
93 .prev_items = (const enum rte_flow_item_type[]) {
94 RTE_FLOW_ITEM_TYPE_IPV4,
95 RTE_FLOW_ITEM_TYPE_END,
98 [RTE_FLOW_ITEM_TYPE_TCP] = {
99 .copy_item = enic_copy_item_tcp_v1,
100 .valid_start_item = 0,
101 .prev_items = (const enum rte_flow_item_type[]) {
102 RTE_FLOW_ITEM_TYPE_IPV4,
103 RTE_FLOW_ITEM_TYPE_END,
109 * NICs have Advanced Filters capability but they are disabled. This means
110 * that layer 3 must be specified.
112 static const struct enic_items enic_items_v2[] = {
113 [RTE_FLOW_ITEM_TYPE_ETH] = {
114 .copy_item = enic_copy_item_eth_v2,
115 .valid_start_item = 1,
116 .prev_items = (const enum rte_flow_item_type[]) {
117 RTE_FLOW_ITEM_TYPE_VXLAN,
118 RTE_FLOW_ITEM_TYPE_END,
121 [RTE_FLOW_ITEM_TYPE_VLAN] = {
122 .copy_item = enic_copy_item_vlan_v2,
123 .valid_start_item = 1,
124 .prev_items = (const enum rte_flow_item_type[]) {
125 RTE_FLOW_ITEM_TYPE_ETH,
126 RTE_FLOW_ITEM_TYPE_END,
129 [RTE_FLOW_ITEM_TYPE_IPV4] = {
130 .copy_item = enic_copy_item_ipv4_v2,
131 .valid_start_item = 1,
132 .prev_items = (const enum rte_flow_item_type[]) {
133 RTE_FLOW_ITEM_TYPE_ETH,
134 RTE_FLOW_ITEM_TYPE_VLAN,
135 RTE_FLOW_ITEM_TYPE_END,
138 [RTE_FLOW_ITEM_TYPE_IPV6] = {
139 .copy_item = enic_copy_item_ipv6_v2,
140 .valid_start_item = 1,
141 .prev_items = (const enum rte_flow_item_type[]) {
142 RTE_FLOW_ITEM_TYPE_ETH,
143 RTE_FLOW_ITEM_TYPE_VLAN,
144 RTE_FLOW_ITEM_TYPE_END,
147 [RTE_FLOW_ITEM_TYPE_UDP] = {
148 .copy_item = enic_copy_item_udp_v2,
149 .valid_start_item = 0,
150 .prev_items = (const enum rte_flow_item_type[]) {
151 RTE_FLOW_ITEM_TYPE_IPV4,
152 RTE_FLOW_ITEM_TYPE_IPV6,
153 RTE_FLOW_ITEM_TYPE_END,
156 [RTE_FLOW_ITEM_TYPE_TCP] = {
157 .copy_item = enic_copy_item_tcp_v2,
158 .valid_start_item = 0,
159 .prev_items = (const enum rte_flow_item_type[]) {
160 RTE_FLOW_ITEM_TYPE_IPV4,
161 RTE_FLOW_ITEM_TYPE_IPV6,
162 RTE_FLOW_ITEM_TYPE_END,
165 [RTE_FLOW_ITEM_TYPE_SCTP] = {
166 .copy_item = enic_copy_item_sctp_v2,
167 .valid_start_item = 0,
168 .prev_items = (const enum rte_flow_item_type[]) {
169 RTE_FLOW_ITEM_TYPE_IPV4,
170 RTE_FLOW_ITEM_TYPE_IPV6,
171 RTE_FLOW_ITEM_TYPE_END,
174 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
175 .copy_item = enic_copy_item_vxlan_v2,
176 .valid_start_item = 0,
177 .prev_items = (const enum rte_flow_item_type[]) {
178 RTE_FLOW_ITEM_TYPE_UDP,
179 RTE_FLOW_ITEM_TYPE_END,
184 /** NICs with Advanced filters enabled */
185 static const struct enic_items enic_items_v3[] = {
186 [RTE_FLOW_ITEM_TYPE_ETH] = {
187 .copy_item = enic_copy_item_eth_v2,
188 .valid_start_item = 1,
189 .prev_items = (const enum rte_flow_item_type[]) {
190 RTE_FLOW_ITEM_TYPE_VXLAN,
191 RTE_FLOW_ITEM_TYPE_END,
194 [RTE_FLOW_ITEM_TYPE_VLAN] = {
195 .copy_item = enic_copy_item_vlan_v2,
196 .valid_start_item = 1,
197 .prev_items = (const enum rte_flow_item_type[]) {
198 RTE_FLOW_ITEM_TYPE_ETH,
199 RTE_FLOW_ITEM_TYPE_END,
202 [RTE_FLOW_ITEM_TYPE_IPV4] = {
203 .copy_item = enic_copy_item_ipv4_v2,
204 .valid_start_item = 1,
205 .prev_items = (const enum rte_flow_item_type[]) {
206 RTE_FLOW_ITEM_TYPE_ETH,
207 RTE_FLOW_ITEM_TYPE_VLAN,
208 RTE_FLOW_ITEM_TYPE_END,
211 [RTE_FLOW_ITEM_TYPE_IPV6] = {
212 .copy_item = enic_copy_item_ipv6_v2,
213 .valid_start_item = 1,
214 .prev_items = (const enum rte_flow_item_type[]) {
215 RTE_FLOW_ITEM_TYPE_ETH,
216 RTE_FLOW_ITEM_TYPE_VLAN,
217 RTE_FLOW_ITEM_TYPE_END,
220 [RTE_FLOW_ITEM_TYPE_UDP] = {
221 .copy_item = enic_copy_item_udp_v2,
222 .valid_start_item = 1,
223 .prev_items = (const enum rte_flow_item_type[]) {
224 RTE_FLOW_ITEM_TYPE_IPV4,
225 RTE_FLOW_ITEM_TYPE_IPV6,
226 RTE_FLOW_ITEM_TYPE_END,
229 [RTE_FLOW_ITEM_TYPE_TCP] = {
230 .copy_item = enic_copy_item_tcp_v2,
231 .valid_start_item = 1,
232 .prev_items = (const enum rte_flow_item_type[]) {
233 RTE_FLOW_ITEM_TYPE_IPV4,
234 RTE_FLOW_ITEM_TYPE_IPV6,
235 RTE_FLOW_ITEM_TYPE_END,
238 [RTE_FLOW_ITEM_TYPE_SCTP] = {
239 .copy_item = enic_copy_item_sctp_v2,
240 .valid_start_item = 1,
241 .prev_items = (const enum rte_flow_item_type[]) {
242 RTE_FLOW_ITEM_TYPE_IPV4,
243 RTE_FLOW_ITEM_TYPE_IPV6,
244 RTE_FLOW_ITEM_TYPE_END,
247 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
248 .copy_item = enic_copy_item_vxlan_v2,
249 .valid_start_item = 1,
250 .prev_items = (const enum rte_flow_item_type[]) {
251 RTE_FLOW_ITEM_TYPE_UDP,
252 RTE_FLOW_ITEM_TYPE_END,
257 /** Filtering capabilities indexed this NICs supported filter type. */
258 static const struct enic_filter_cap enic_filter_cap[] = {
259 [FILTER_IPV4_5TUPLE] = {
260 .item_info = enic_items_v1,
262 [FILTER_USNIC_IP] = {
263 .item_info = enic_items_v2,
266 .item_info = enic_items_v3,
270 /** Supported actions for older NICs */
271 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
272 RTE_FLOW_ACTION_TYPE_QUEUE,
273 RTE_FLOW_ACTION_TYPE_END,
276 /** Supported actions for newer NICs */
277 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
278 RTE_FLOW_ACTION_TYPE_QUEUE,
279 RTE_FLOW_ACTION_TYPE_MARK,
280 RTE_FLOW_ACTION_TYPE_FLAG,
281 RTE_FLOW_ACTION_TYPE_END,
284 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
285 RTE_FLOW_ACTION_TYPE_QUEUE,
286 RTE_FLOW_ACTION_TYPE_MARK,
287 RTE_FLOW_ACTION_TYPE_FLAG,
288 RTE_FLOW_ACTION_TYPE_DROP,
289 RTE_FLOW_ACTION_TYPE_END,
292 /** Action capabilities indexed by NIC version information */
293 static const struct enic_action_cap enic_action_cap[] = {
294 [FILTER_ACTION_RQ_STEERING_FLAG] = {
295 .actions = enic_supported_actions_v1,
296 .copy_fn = enic_copy_action_v1,
298 [FILTER_ACTION_FILTER_ID_FLAG] = {
299 .actions = enic_supported_actions_v2_id,
300 .copy_fn = enic_copy_action_v2,
302 [FILTER_ACTION_DROP_FLAG] = {
303 .actions = enic_supported_actions_v2_drop,
304 .copy_fn = enic_copy_action_v2,
309 mask_exact_match(const u8 *supported, const u8 *supplied,
313 for (i = 0; i < size; i++) {
314 if (supported[i] != supplied[i])
321 * Copy IPv4 item into version 1 NIC filter.
324 * Item specification.
325 * @param enic_filter[out]
326 * Partially filled in NIC filter structure.
327 * @param inner_ofst[in]
328 * Should always be 0 for version 1.
331 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
332 struct filter_v2 *enic_filter, u8 *inner_ofst)
334 const struct rte_flow_item_ipv4 *spec = item->spec;
335 const struct rte_flow_item_ipv4 *mask = item->mask;
336 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
337 struct ipv4_hdr supported_mask = {
338 .src_addr = 0xffffffff,
339 .dst_addr = 0xffffffff,
348 mask = &rte_flow_item_ipv4_mask;
350 /* This is an exact match filter, both fields must be set */
351 if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
352 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
356 /* check that the suppied mask exactly matches capabilty */
357 if (!mask_exact_match((const u8 *)&supported_mask,
358 (const u8 *)item->mask, sizeof(*mask))) {
359 FLOW_LOG(ERR, "IPv4 exact match mask");
363 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
364 enic_5tup->src_addr = spec->hdr.src_addr;
365 enic_5tup->dst_addr = spec->hdr.dst_addr;
371 * Copy UDP item into version 1 NIC filter.
374 * Item specification.
375 * @param enic_filter[out]
376 * Partially filled in NIC filter structure.
377 * @param inner_ofst[in]
378 * Should always be 0 for version 1.
381 enic_copy_item_udp_v1(const struct rte_flow_item *item,
382 struct filter_v2 *enic_filter, u8 *inner_ofst)
384 const struct rte_flow_item_udp *spec = item->spec;
385 const struct rte_flow_item_udp *mask = item->mask;
386 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
387 struct udp_hdr supported_mask = {
398 mask = &rte_flow_item_udp_mask;
400 /* This is an exact match filter, both ports must be set */
401 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
402 FLOW_LOG(ERR, "UDP exact match src/dst addr");
406 /* check that the suppied mask exactly matches capabilty */
407 if (!mask_exact_match((const u8 *)&supported_mask,
408 (const u8 *)item->mask, sizeof(*mask))) {
409 FLOW_LOG(ERR, "UDP exact match mask");
413 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
414 enic_5tup->src_port = spec->hdr.src_port;
415 enic_5tup->dst_port = spec->hdr.dst_port;
416 enic_5tup->protocol = PROTO_UDP;
422 * Copy TCP item into version 1 NIC filter.
425 * Item specification.
426 * @param enic_filter[out]
427 * Partially filled in NIC filter structure.
428 * @param inner_ofst[in]
429 * Should always be 0 for version 1.
432 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
433 struct filter_v2 *enic_filter, u8 *inner_ofst)
435 const struct rte_flow_item_tcp *spec = item->spec;
436 const struct rte_flow_item_tcp *mask = item->mask;
437 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
438 struct tcp_hdr supported_mask = {
449 mask = &rte_flow_item_tcp_mask;
451 /* This is an exact match filter, both ports must be set */
452 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
453 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
457 /* check that the suppied mask exactly matches capabilty */
458 if (!mask_exact_match((const u8 *)&supported_mask,
459 (const u8 *)item->mask, sizeof(*mask))) {
460 FLOW_LOG(ERR, "TCP exact match mask");
464 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
465 enic_5tup->src_port = spec->hdr.src_port;
466 enic_5tup->dst_port = spec->hdr.dst_port;
467 enic_5tup->protocol = PROTO_TCP;
473 * Copy ETH item into version 2 NIC filter.
476 * Item specification.
477 * @param enic_filter[out]
478 * Partially filled in NIC filter structure.
479 * @param inner_ofst[in]
480 * If zero, this is an outer header. If non-zero, this is the offset into L5
481 * where the header begins.
484 enic_copy_item_eth_v2(const struct rte_flow_item *item,
485 struct filter_v2 *enic_filter, u8 *inner_ofst)
487 struct ether_hdr enic_spec;
488 struct ether_hdr enic_mask;
489 const struct rte_flow_item_eth *spec = item->spec;
490 const struct rte_flow_item_eth *mask = item->mask;
491 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
495 /* Match all if no spec */
500 mask = &rte_flow_item_eth_mask;
502 memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
504 memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
507 memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
509 memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
511 enic_spec.ether_type = spec->type;
512 enic_mask.ether_type = mask->type;
514 if (*inner_ofst == 0) {
516 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
517 sizeof(struct ether_hdr));
518 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
519 sizeof(struct ether_hdr));
522 if ((*inner_ofst + sizeof(struct ether_hdr)) >
523 FILTER_GENERIC_1_KEY_LEN)
525 /* Offset into L5 where inner Ethernet header goes */
526 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
527 &enic_mask, sizeof(struct ether_hdr));
528 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
529 &enic_spec, sizeof(struct ether_hdr));
530 *inner_ofst += sizeof(struct ether_hdr);
536 * Copy VLAN item into version 2 NIC filter.
539 * Item specification.
540 * @param enic_filter[out]
541 * Partially filled in NIC filter structure.
542 * @param inner_ofst[in]
543 * If zero, this is an outer header. If non-zero, this is the offset into L5
544 * where the header begins.
547 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
548 struct filter_v2 *enic_filter, u8 *inner_ofst)
550 const struct rte_flow_item_vlan *spec = item->spec;
551 const struct rte_flow_item_vlan *mask = item->mask;
552 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
556 /* Match all if no spec */
561 mask = &rte_flow_item_vlan_mask;
563 if (*inner_ofst == 0) {
564 struct ether_hdr *eth_mask =
565 (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
566 struct ether_hdr *eth_val =
567 (void *)gp->layer[FILTER_GENERIC_1_L2].val;
569 /* Outer TPID cannot be matched */
570 if (eth_mask->ether_type)
572 eth_mask->ether_type = mask->inner_type;
573 eth_val->ether_type = spec->inner_type;
575 /* Outer header. Use the vlan mask/val fields */
576 gp->mask_vlan = mask->tci;
577 gp->val_vlan = spec->tci;
579 /* Inner header. Mask/Val start at *inner_ofst into L5 */
580 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
581 FILTER_GENERIC_1_KEY_LEN)
583 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
584 mask, sizeof(struct vlan_hdr));
585 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
586 spec, sizeof(struct vlan_hdr));
587 *inner_ofst += sizeof(struct vlan_hdr);
593 * Copy IPv4 item into version 2 NIC filter.
596 * Item specification.
597 * @param enic_filter[out]
598 * Partially filled in NIC filter structure.
599 * @param inner_ofst[in]
600 * Must be 0. Don't support inner IPv4 filtering.
603 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
604 struct filter_v2 *enic_filter, u8 *inner_ofst)
606 const struct rte_flow_item_ipv4 *spec = item->spec;
607 const struct rte_flow_item_ipv4 *mask = item->mask;
608 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
612 if (*inner_ofst == 0) {
614 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
615 gp->val_flags |= FILTER_GENERIC_1_IPV4;
617 /* Match all if no spec */
622 mask = &rte_flow_item_ipv4_mask;
624 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
625 sizeof(struct ipv4_hdr));
626 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
627 sizeof(struct ipv4_hdr));
629 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
630 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
631 FILTER_GENERIC_1_KEY_LEN)
633 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
634 mask, sizeof(struct ipv4_hdr));
635 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
636 spec, sizeof(struct ipv4_hdr));
637 *inner_ofst += sizeof(struct ipv4_hdr);
643 * Copy IPv6 item into version 2 NIC filter.
646 * Item specification.
647 * @param enic_filter[out]
648 * Partially filled in NIC filter structure.
649 * @param inner_ofst[in]
650 * Must be 0. Don't support inner IPv6 filtering.
653 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
654 struct filter_v2 *enic_filter, u8 *inner_ofst)
656 const struct rte_flow_item_ipv6 *spec = item->spec;
657 const struct rte_flow_item_ipv6 *mask = item->mask;
658 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
663 gp->mask_flags |= FILTER_GENERIC_1_IPV6;
664 gp->val_flags |= FILTER_GENERIC_1_IPV6;
666 /* Match all if no spec */
671 mask = &rte_flow_item_ipv6_mask;
673 if (*inner_ofst == 0) {
674 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
675 sizeof(struct ipv6_hdr));
676 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
677 sizeof(struct ipv6_hdr));
679 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
680 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
681 FILTER_GENERIC_1_KEY_LEN)
683 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
684 mask, sizeof(struct ipv6_hdr));
685 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
686 spec, sizeof(struct ipv6_hdr));
687 *inner_ofst += sizeof(struct ipv6_hdr);
693 * Copy UDP item into version 2 NIC filter.
696 * Item specification.
697 * @param enic_filter[out]
698 * Partially filled in NIC filter structure.
699 * @param inner_ofst[in]
700 * Must be 0. Don't support inner UDP filtering.
703 enic_copy_item_udp_v2(const struct rte_flow_item *item,
704 struct filter_v2 *enic_filter, u8 *inner_ofst)
706 const struct rte_flow_item_udp *spec = item->spec;
707 const struct rte_flow_item_udp *mask = item->mask;
708 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
713 gp->mask_flags |= FILTER_GENERIC_1_UDP;
714 gp->val_flags |= FILTER_GENERIC_1_UDP;
716 /* Match all if no spec */
721 mask = &rte_flow_item_udp_mask;
723 if (*inner_ofst == 0) {
724 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
725 sizeof(struct udp_hdr));
726 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
727 sizeof(struct udp_hdr));
729 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
730 if ((*inner_ofst + sizeof(struct udp_hdr)) >
731 FILTER_GENERIC_1_KEY_LEN)
733 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
734 mask, sizeof(struct udp_hdr));
735 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
736 spec, sizeof(struct udp_hdr));
737 *inner_ofst += sizeof(struct udp_hdr);
743 * Copy TCP item into version 2 NIC filter.
746 * Item specification.
747 * @param enic_filter[out]
748 * Partially filled in NIC filter structure.
749 * @param inner_ofst[in]
750 * Must be 0. Don't support inner TCP filtering.
753 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
754 struct filter_v2 *enic_filter, u8 *inner_ofst)
756 const struct rte_flow_item_tcp *spec = item->spec;
757 const struct rte_flow_item_tcp *mask = item->mask;
758 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
763 gp->mask_flags |= FILTER_GENERIC_1_TCP;
764 gp->val_flags |= FILTER_GENERIC_1_TCP;
766 /* Match all if no spec */
773 if (*inner_ofst == 0) {
774 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
775 sizeof(struct tcp_hdr));
776 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
777 sizeof(struct tcp_hdr));
779 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
780 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
781 FILTER_GENERIC_1_KEY_LEN)
783 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
784 mask, sizeof(struct tcp_hdr));
785 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
786 spec, sizeof(struct tcp_hdr));
787 *inner_ofst += sizeof(struct tcp_hdr);
793 * Copy SCTP item into version 2 NIC filter.
796 * Item specification.
797 * @param enic_filter[out]
798 * Partially filled in NIC filter structure.
799 * @param inner_ofst[in]
800 * Must be 0. Don't support inner SCTP filtering.
803 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
804 struct filter_v2 *enic_filter, u8 *inner_ofst)
806 const struct rte_flow_item_sctp *spec = item->spec;
807 const struct rte_flow_item_sctp *mask = item->mask;
808 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
815 /* Match all if no spec */
820 mask = &rte_flow_item_sctp_mask;
822 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
823 sizeof(struct sctp_hdr));
824 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
825 sizeof(struct sctp_hdr));
830 * Copy UDP item into version 2 NIC filter.
833 * Item specification.
834 * @param enic_filter[out]
835 * Partially filled in NIC filter structure.
836 * @param inner_ofst[in]
837 * Must be 0. VxLAN headers always start at the beginning of L5.
840 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
841 struct filter_v2 *enic_filter, u8 *inner_ofst)
843 const struct rte_flow_item_vxlan *spec = item->spec;
844 const struct rte_flow_item_vxlan *mask = item->mask;
845 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
852 /* Match all if no spec */
857 mask = &rte_flow_item_vxlan_mask;
859 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
860 sizeof(struct vxlan_hdr));
861 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
862 sizeof(struct vxlan_hdr));
864 *inner_ofst = sizeof(struct vxlan_hdr);
869 * Return 1 if current item is valid on top of the previous one.
871 * @param prev_item[in]
872 * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
874 * @param item_info[in]
875 * Info about this item, like valid previous items.
876 * @param is_first[in]
877 * True if this the first item in the pattern.
880 item_stacking_valid(enum rte_flow_item_type prev_item,
881 const struct enic_items *item_info, u8 is_first_item)
883 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
887 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
888 if (prev_item == *allowed_items)
892 /* This is the first item in the stack. Check if that's cool */
893 if (is_first_item && item_info->valid_start_item)
900 * Build the intenal enic filter structure from the provided pattern. The
901 * pattern is validated as the items are copied.
904 * @param items_info[in]
905 * Info about this NICs item support, like valid previous items.
906 * @param enic_filter[out]
907 * NIC specfilc filters derived from the pattern.
911 enic_copy_filter(const struct rte_flow_item pattern[],
912 const struct enic_items *items_info,
913 struct filter_v2 *enic_filter,
914 struct rte_flow_error *error)
917 const struct rte_flow_item *item = pattern;
918 u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
919 enum rte_flow_item_type prev_item;
920 const struct enic_items *item_info;
922 u8 is_first_item = 1;
928 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
929 /* Get info about how to validate and copy the item. If NULL
930 * is returned the nic does not support the item.
932 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
935 item_info = &items_info[item->type];
937 /* check to see if item stacking is valid */
938 if (!item_stacking_valid(prev_item, item_info, is_first_item))
941 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
943 goto item_not_supported;
944 prev_item = item->type;
950 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
951 NULL, "enic type error");
955 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
956 item, "stacking error");
961 * Build the intenal version 1 NIC action structure from the provided pattern.
962 * The pattern is validated as the items are copied.
965 * @param enic_action[out]
966 * NIC specfilc actions derived from the actions.
970 enic_copy_action_v1(const struct rte_flow_action actions[],
971 struct filter_action_v2 *enic_action)
974 uint32_t overlap = 0;
978 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
979 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
982 switch (actions->type) {
983 case RTE_FLOW_ACTION_TYPE_QUEUE: {
984 const struct rte_flow_action_queue *queue =
985 (const struct rte_flow_action_queue *)
991 enic_action->rq_idx =
992 enic_rte_rq_idx_to_sop_idx(queue->index);
1000 if (!(overlap & FATE))
1002 enic_action->type = FILTER_ACTION_RQ_STEERING;
1007 * Build the intenal version 2 NIC action structure from the provided pattern.
1008 * The pattern is validated as the items are copied.
1010 * @param actions[in]
1011 * @param enic_action[out]
1012 * NIC specfilc actions derived from the actions.
1016 enic_copy_action_v2(const struct rte_flow_action actions[],
1017 struct filter_action_v2 *enic_action)
1019 enum { FATE = 1, MARK = 2, };
1020 uint32_t overlap = 0;
1024 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1025 switch (actions->type) {
1026 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1027 const struct rte_flow_action_queue *queue =
1028 (const struct rte_flow_action_queue *)
1034 enic_action->rq_idx =
1035 enic_rte_rq_idx_to_sop_idx(queue->index);
1036 enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1039 case RTE_FLOW_ACTION_TYPE_MARK: {
1040 const struct rte_flow_action_mark *mark =
1041 (const struct rte_flow_action_mark *)
1047 /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
1048 * in the range of allows mark ids.
1050 if (mark->id >= ENIC_MAGIC_FILTER_ID)
1052 enic_action->filter_id = mark->id;
1053 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1056 case RTE_FLOW_ACTION_TYPE_FLAG: {
1060 enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1061 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1064 case RTE_FLOW_ACTION_TYPE_DROP: {
1068 enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1071 case RTE_FLOW_ACTION_TYPE_VOID:
1078 if (!(overlap & FATE))
1080 enic_action->type = FILTER_ACTION_V2;
1084 /** Check if the action is supported */
1086 enic_match_action(const struct rte_flow_action *action,
1087 const enum rte_flow_action_type *supported_actions)
1089 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1090 supported_actions++) {
1091 if (action->type == *supported_actions)
1097 /** Get the NIC filter capabilties structure */
1098 static const struct enic_filter_cap *
1099 enic_get_filter_cap(struct enic *enic)
1101 if (enic->flow_filter_mode)
1102 return &enic_filter_cap[enic->flow_filter_mode];
1107 /** Get the actions for this NIC version. */
1108 static const struct enic_action_cap *
1109 enic_get_action_cap(struct enic *enic)
1111 const struct enic_action_cap *ea;
1114 actions = enic->filter_actions;
1115 if (actions & FILTER_ACTION_DROP_FLAG)
1116 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1117 else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1118 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1120 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1124 /* Debug function to dump internal NIC action structure. */
1126 enic_dump_actions(const struct filter_action_v2 *ea)
1128 if (ea->type == FILTER_ACTION_RQ_STEERING) {
1129 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1130 } else if (ea->type == FILTER_ACTION_V2) {
1131 FLOW_LOG(INFO, "Actions(V2)\n");
1132 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1133 FLOW_LOG(INFO, "\tqueue: %u\n",
1134 enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1135 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1136 FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1140 /* Debug function to dump internal NIC filter structure. */
1142 enic_dump_filter(const struct filter_v2 *filt)
1144 const struct filter_generic_1 *gp;
1147 char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1148 char l4csum[16], ipfrag[16];
1150 switch (filt->type) {
1151 case FILTER_IPV4_5TUPLE:
1152 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1154 case FILTER_USNIC_IP:
1156 /* FIXME: this should be a loop */
1157 gp = &filt->u.generic_1;
1158 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1159 gp->val_vlan, gp->mask_vlan);
1161 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1163 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1164 ? "ip4(y)" : "ip4(n)");
1166 sprintf(ip4, "%s ", "ip4(x)");
1168 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1170 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1171 ? "ip6(y)" : "ip6(n)");
1173 sprintf(ip6, "%s ", "ip6(x)");
1175 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1177 (gp->val_flags & FILTER_GENERIC_1_UDP)
1178 ? "udp(y)" : "udp(n)");
1180 sprintf(udp, "%s ", "udp(x)");
1182 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1184 (gp->val_flags & FILTER_GENERIC_1_TCP)
1185 ? "tcp(y)" : "tcp(n)");
1187 sprintf(tcp, "%s ", "tcp(x)");
1189 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1190 sprintf(tcpudp, "%s ",
1191 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1192 ? "tcpudp(y)" : "tcpudp(n)");
1194 sprintf(tcpudp, "%s ", "tcpudp(x)");
1196 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1197 sprintf(ip4csum, "%s ",
1198 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1199 ? "ip4csum(y)" : "ip4csum(n)");
1201 sprintf(ip4csum, "%s ", "ip4csum(x)");
1203 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1204 sprintf(l4csum, "%s ",
1205 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1206 ? "l4csum(y)" : "l4csum(n)");
1208 sprintf(l4csum, "%s ", "l4csum(x)");
1210 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1211 sprintf(ipfrag, "%s ",
1212 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1213 ? "ipfrag(y)" : "ipfrag(n)");
1215 sprintf(ipfrag, "%s ", "ipfrag(x)");
1216 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1217 tcp, tcpudp, ip4csum, l4csum, ipfrag);
1219 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1220 mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1221 while (mbyte && !gp->layer[i].mask[mbyte])
1227 for (j = 0; j <= mbyte; j++) {
1229 gp->layer[i].mask[j]);
1233 FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1235 for (j = 0; j <= mbyte; j++) {
1237 gp->layer[i].val[j]);
1241 FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
1245 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1250 /* Debug function to dump internal NIC flow structures. */
1252 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1254 enic_dump_filter(filt);
1255 enic_dump_actions(ea);
1260 * Internal flow parse/validate function.
1263 * This device pointer.
1264 * @param pattern[in]
1265 * @param actions[in]
1267 * @param enic_filter[out]
1268 * Internal NIC filter structure pointer.
1269 * @param enic_action[out]
1270 * Internal NIC action structure pointer.
1273 enic_flow_parse(struct rte_eth_dev *dev,
1274 const struct rte_flow_attr *attrs,
1275 const struct rte_flow_item pattern[],
1276 const struct rte_flow_action actions[],
1277 struct rte_flow_error *error,
1278 struct filter_v2 *enic_filter,
1279 struct filter_action_v2 *enic_action)
1281 unsigned int ret = 0;
1282 struct enic *enic = pmd_priv(dev);
1283 const struct enic_filter_cap *enic_filter_cap;
1284 const struct enic_action_cap *enic_action_cap;
1285 const struct rte_flow_action *action;
1289 memset(enic_filter, 0, sizeof(*enic_filter));
1290 memset(enic_action, 0, sizeof(*enic_action));
1293 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1294 NULL, "No pattern specified");
1299 rte_flow_error_set(error, EINVAL,
1300 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1301 NULL, "No action specified");
1307 rte_flow_error_set(error, ENOTSUP,
1308 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1310 "priority groups are not supported");
1312 } else if (attrs->priority) {
1313 rte_flow_error_set(error, ENOTSUP,
1314 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1316 "priorities are not supported");
1318 } else if (attrs->egress) {
1319 rte_flow_error_set(error, ENOTSUP,
1320 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1322 "egress is not supported");
1324 } else if (attrs->transfer) {
1325 rte_flow_error_set(error, ENOTSUP,
1326 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1328 "transfer is not supported");
1330 } else if (!attrs->ingress) {
1331 rte_flow_error_set(error, ENOTSUP,
1332 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1334 "only ingress is supported");
1339 rte_flow_error_set(error, EINVAL,
1340 RTE_FLOW_ERROR_TYPE_ATTR,
1341 NULL, "No attribute specified");
1345 /* Verify Actions. */
1346 enic_action_cap = enic_get_action_cap(enic);
1347 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1349 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1351 else if (!enic_match_action(action, enic_action_cap->actions))
1354 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1355 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1356 action, "Invalid action.");
1359 ret = enic_action_cap->copy_fn(actions, enic_action);
1361 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1362 NULL, "Unsupported action.");
1366 /* Verify Flow items. If copying the filter from flow format to enic
1367 * format fails, the flow is not supported
1369 enic_filter_cap = enic_get_filter_cap(enic);
1370 if (enic_filter_cap == NULL) {
1371 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1372 NULL, "Flow API not available");
1375 enic_filter->type = enic->flow_filter_mode;
1376 ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
1377 enic_filter, error);
1382 * Push filter/action to the NIC.
1385 * Device structure pointer.
1386 * @param enic_filter[in]
1387 * Internal NIC filter structure pointer.
1388 * @param enic_action[in]
1389 * Internal NIC action structure pointer.
1392 static struct rte_flow *
1393 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1394 struct filter_action_v2 *enic_action,
1395 struct rte_flow_error *error)
1397 struct rte_flow *flow;
1403 flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1405 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1406 NULL, "cannot allocate flow memory");
1410 /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1411 entry = enic_action->rq_idx;
1412 ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1415 flow->enic_filter_id = entry;
1416 flow->enic_filter = *enic_filter;
1418 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1419 NULL, "vnic_dev_classifier error");
1427 * Remove filter/action from the NIC.
1430 * Device structure pointer.
1431 * @param filter_id[in]
1433 * @param enic_action[in]
1434 * Internal NIC action structure pointer.
1438 enic_flow_del_filter(struct enic *enic, u16 filter_id,
1439 struct rte_flow_error *error)
1445 ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1447 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1448 NULL, "vnic_dev_classifier failed");
1453 * The following functions are callbacks for Generic flow API.
1457 * Validate a flow supported by the NIC.
1459 * @see rte_flow_validate()
1463 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1464 const struct rte_flow_item pattern[],
1465 const struct rte_flow_action actions[],
1466 struct rte_flow_error *error)
1468 struct filter_v2 enic_filter;
1469 struct filter_action_v2 enic_action;
1474 ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1475 &enic_filter, &enic_action);
1477 enic_dump_flow(&enic_action, &enic_filter);
1482 * Create a flow supported by the NIC.
1484 * @see rte_flow_create()
1487 static struct rte_flow *
1488 enic_flow_create(struct rte_eth_dev *dev,
1489 const struct rte_flow_attr *attrs,
1490 const struct rte_flow_item pattern[],
1491 const struct rte_flow_action actions[],
1492 struct rte_flow_error *error)
1495 struct filter_v2 enic_filter;
1496 struct filter_action_v2 enic_action;
1497 struct rte_flow *flow;
1498 struct enic *enic = pmd_priv(dev);
1502 ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1507 rte_spinlock_lock(&enic->flows_lock);
1508 flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1511 LIST_INSERT_HEAD(&enic->flows, flow, next);
1512 rte_spinlock_unlock(&enic->flows_lock);
1518 * Destroy a flow supported by the NIC.
1520 * @see rte_flow_destroy()
1524 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1525 __rte_unused struct rte_flow_error *error)
1527 struct enic *enic = pmd_priv(dev);
1531 rte_spinlock_lock(&enic->flows_lock);
1532 enic_flow_del_filter(enic, flow->enic_filter_id, error);
1533 LIST_REMOVE(flow, next);
1534 rte_spinlock_unlock(&enic->flows_lock);
1539 * Flush all flows on the device.
1541 * @see rte_flow_flush()
1545 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1547 struct rte_flow *flow;
1548 struct enic *enic = pmd_priv(dev);
1552 rte_spinlock_lock(&enic->flows_lock);
1554 while (!LIST_EMPTY(&enic->flows)) {
1555 flow = LIST_FIRST(&enic->flows);
1556 enic_flow_del_filter(enic, flow->enic_filter_id, error);
1557 LIST_REMOVE(flow, next);
1559 rte_spinlock_unlock(&enic->flows_lock);
1564 * Flow callback registration.
1568 const struct rte_flow_ops enic_flow_ops = {
1569 .validate = enic_flow_validate,
1570 .create = enic_flow_create,
1571 .destroy = enic_flow_destroy,
1572 .flush = enic_flow_flush,