1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
14 #include "enic_compat.h"
19 #define FLOW_TRACE() \
20 rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
22 #define FLOW_LOG(level, fmt, args...) \
23 rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
26 /** Info about how to copy items into enic filters. */
28 /** Function for copying and validating an item. */
29 int (*copy_item)(const struct rte_flow_item *item,
30 struct filter_v2 *enic_filter, u8 *inner_ofst);
31 /** List of valid previous items. */
32 const enum rte_flow_item_type * const prev_items;
33 /** True if it's OK for this item to be the first item. For some NIC
34 * versions, it's invalid to start the stack above layer 3.
36 const u8 valid_start_item;
39 /** Filtering capabilities for various NIC and firmware versions. */
40 struct enic_filter_cap {
41 /** list of valid items and their handlers and attributes. */
42 const struct enic_items *item_info;
45 /* functions for copying flow actions into enic actions */
46 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
47 struct filter_action_v2 *enic_action);
49 /* functions for copying items into enic filters */
50 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
51 struct filter_v2 *enic_filter, u8 *inner_ofst);
53 /** Action capabilities for various NICs. */
54 struct enic_action_cap {
55 /** list of valid actions */
56 const enum rte_flow_action_type *actions;
57 /** copy function for a particular NIC */
58 int (*copy_fn)(const struct rte_flow_action actions[],
59 struct filter_action_v2 *enic_action);
62 /* Forward declarations */
63 static enic_copy_item_fn enic_copy_item_ipv4_v1;
64 static enic_copy_item_fn enic_copy_item_udp_v1;
65 static enic_copy_item_fn enic_copy_item_tcp_v1;
66 static enic_copy_item_fn enic_copy_item_eth_v2;
67 static enic_copy_item_fn enic_copy_item_vlan_v2;
68 static enic_copy_item_fn enic_copy_item_ipv4_v2;
69 static enic_copy_item_fn enic_copy_item_ipv6_v2;
70 static enic_copy_item_fn enic_copy_item_udp_v2;
71 static enic_copy_item_fn enic_copy_item_tcp_v2;
72 static enic_copy_item_fn enic_copy_item_sctp_v2;
73 static enic_copy_item_fn enic_copy_item_sctp_v2;
74 static enic_copy_item_fn enic_copy_item_vxlan_v2;
75 static copy_action_fn enic_copy_action_v1;
76 static copy_action_fn enic_copy_action_v2;
79 * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
82 static const struct enic_items enic_items_v1[] = {
83 [RTE_FLOW_ITEM_TYPE_IPV4] = {
84 .copy_item = enic_copy_item_ipv4_v1,
85 .valid_start_item = 1,
86 .prev_items = (const enum rte_flow_item_type[]) {
87 RTE_FLOW_ITEM_TYPE_END,
90 [RTE_FLOW_ITEM_TYPE_UDP] = {
91 .copy_item = enic_copy_item_udp_v1,
92 .valid_start_item = 0,
93 .prev_items = (const enum rte_flow_item_type[]) {
94 RTE_FLOW_ITEM_TYPE_IPV4,
95 RTE_FLOW_ITEM_TYPE_END,
98 [RTE_FLOW_ITEM_TYPE_TCP] = {
99 .copy_item = enic_copy_item_tcp_v1,
100 .valid_start_item = 0,
101 .prev_items = (const enum rte_flow_item_type[]) {
102 RTE_FLOW_ITEM_TYPE_IPV4,
103 RTE_FLOW_ITEM_TYPE_END,
109 * NICs have Advanced Filters capability but they are disabled. This means
110 * that layer 3 must be specified.
112 static const struct enic_items enic_items_v2[] = {
113 [RTE_FLOW_ITEM_TYPE_ETH] = {
114 .copy_item = enic_copy_item_eth_v2,
115 .valid_start_item = 1,
116 .prev_items = (const enum rte_flow_item_type[]) {
117 RTE_FLOW_ITEM_TYPE_VXLAN,
118 RTE_FLOW_ITEM_TYPE_END,
121 [RTE_FLOW_ITEM_TYPE_VLAN] = {
122 .copy_item = enic_copy_item_vlan_v2,
123 .valid_start_item = 1,
124 .prev_items = (const enum rte_flow_item_type[]) {
125 RTE_FLOW_ITEM_TYPE_ETH,
126 RTE_FLOW_ITEM_TYPE_END,
129 [RTE_FLOW_ITEM_TYPE_IPV4] = {
130 .copy_item = enic_copy_item_ipv4_v2,
131 .valid_start_item = 1,
132 .prev_items = (const enum rte_flow_item_type[]) {
133 RTE_FLOW_ITEM_TYPE_ETH,
134 RTE_FLOW_ITEM_TYPE_VLAN,
135 RTE_FLOW_ITEM_TYPE_END,
138 [RTE_FLOW_ITEM_TYPE_IPV6] = {
139 .copy_item = enic_copy_item_ipv6_v2,
140 .valid_start_item = 1,
141 .prev_items = (const enum rte_flow_item_type[]) {
142 RTE_FLOW_ITEM_TYPE_ETH,
143 RTE_FLOW_ITEM_TYPE_VLAN,
144 RTE_FLOW_ITEM_TYPE_END,
147 [RTE_FLOW_ITEM_TYPE_UDP] = {
148 .copy_item = enic_copy_item_udp_v2,
149 .valid_start_item = 0,
150 .prev_items = (const enum rte_flow_item_type[]) {
151 RTE_FLOW_ITEM_TYPE_IPV4,
152 RTE_FLOW_ITEM_TYPE_IPV6,
153 RTE_FLOW_ITEM_TYPE_END,
156 [RTE_FLOW_ITEM_TYPE_TCP] = {
157 .copy_item = enic_copy_item_tcp_v2,
158 .valid_start_item = 0,
159 .prev_items = (const enum rte_flow_item_type[]) {
160 RTE_FLOW_ITEM_TYPE_IPV4,
161 RTE_FLOW_ITEM_TYPE_IPV6,
162 RTE_FLOW_ITEM_TYPE_END,
165 [RTE_FLOW_ITEM_TYPE_SCTP] = {
166 .copy_item = enic_copy_item_sctp_v2,
167 .valid_start_item = 0,
168 .prev_items = (const enum rte_flow_item_type[]) {
169 RTE_FLOW_ITEM_TYPE_IPV4,
170 RTE_FLOW_ITEM_TYPE_IPV6,
171 RTE_FLOW_ITEM_TYPE_END,
174 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
175 .copy_item = enic_copy_item_vxlan_v2,
176 .valid_start_item = 0,
177 .prev_items = (const enum rte_flow_item_type[]) {
178 RTE_FLOW_ITEM_TYPE_UDP,
179 RTE_FLOW_ITEM_TYPE_END,
184 /** NICs with Advanced filters enabled */
185 static const struct enic_items enic_items_v3[] = {
186 [RTE_FLOW_ITEM_TYPE_ETH] = {
187 .copy_item = enic_copy_item_eth_v2,
188 .valid_start_item = 1,
189 .prev_items = (const enum rte_flow_item_type[]) {
190 RTE_FLOW_ITEM_TYPE_VXLAN,
191 RTE_FLOW_ITEM_TYPE_END,
194 [RTE_FLOW_ITEM_TYPE_VLAN] = {
195 .copy_item = enic_copy_item_vlan_v2,
196 .valid_start_item = 1,
197 .prev_items = (const enum rte_flow_item_type[]) {
198 RTE_FLOW_ITEM_TYPE_ETH,
199 RTE_FLOW_ITEM_TYPE_END,
202 [RTE_FLOW_ITEM_TYPE_IPV4] = {
203 .copy_item = enic_copy_item_ipv4_v2,
204 .valid_start_item = 1,
205 .prev_items = (const enum rte_flow_item_type[]) {
206 RTE_FLOW_ITEM_TYPE_ETH,
207 RTE_FLOW_ITEM_TYPE_VLAN,
208 RTE_FLOW_ITEM_TYPE_END,
211 [RTE_FLOW_ITEM_TYPE_IPV6] = {
212 .copy_item = enic_copy_item_ipv6_v2,
213 .valid_start_item = 1,
214 .prev_items = (const enum rte_flow_item_type[]) {
215 RTE_FLOW_ITEM_TYPE_ETH,
216 RTE_FLOW_ITEM_TYPE_VLAN,
217 RTE_FLOW_ITEM_TYPE_END,
220 [RTE_FLOW_ITEM_TYPE_UDP] = {
221 .copy_item = enic_copy_item_udp_v2,
222 .valid_start_item = 1,
223 .prev_items = (const enum rte_flow_item_type[]) {
224 RTE_FLOW_ITEM_TYPE_IPV4,
225 RTE_FLOW_ITEM_TYPE_IPV6,
226 RTE_FLOW_ITEM_TYPE_END,
229 [RTE_FLOW_ITEM_TYPE_TCP] = {
230 .copy_item = enic_copy_item_tcp_v2,
231 .valid_start_item = 1,
232 .prev_items = (const enum rte_flow_item_type[]) {
233 RTE_FLOW_ITEM_TYPE_IPV4,
234 RTE_FLOW_ITEM_TYPE_IPV6,
235 RTE_FLOW_ITEM_TYPE_END,
238 [RTE_FLOW_ITEM_TYPE_SCTP] = {
239 .copy_item = enic_copy_item_sctp_v2,
240 .valid_start_item = 1,
241 .prev_items = (const enum rte_flow_item_type[]) {
242 RTE_FLOW_ITEM_TYPE_IPV4,
243 RTE_FLOW_ITEM_TYPE_IPV6,
244 RTE_FLOW_ITEM_TYPE_END,
247 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
248 .copy_item = enic_copy_item_vxlan_v2,
249 .valid_start_item = 1,
250 .prev_items = (const enum rte_flow_item_type[]) {
251 RTE_FLOW_ITEM_TYPE_UDP,
252 RTE_FLOW_ITEM_TYPE_END,
257 /** Filtering capabilities indexed this NICs supported filter type. */
258 static const struct enic_filter_cap enic_filter_cap[] = {
259 [FILTER_IPV4_5TUPLE] = {
260 .item_info = enic_items_v1,
262 [FILTER_USNIC_IP] = {
263 .item_info = enic_items_v2,
266 .item_info = enic_items_v3,
270 /** Supported actions for older NICs */
271 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
272 RTE_FLOW_ACTION_TYPE_QUEUE,
273 RTE_FLOW_ACTION_TYPE_END,
276 /** Supported actions for newer NICs */
277 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
278 RTE_FLOW_ACTION_TYPE_QUEUE,
279 RTE_FLOW_ACTION_TYPE_MARK,
280 RTE_FLOW_ACTION_TYPE_FLAG,
281 RTE_FLOW_ACTION_TYPE_END,
284 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
285 RTE_FLOW_ACTION_TYPE_QUEUE,
286 RTE_FLOW_ACTION_TYPE_MARK,
287 RTE_FLOW_ACTION_TYPE_FLAG,
288 RTE_FLOW_ACTION_TYPE_DROP,
289 RTE_FLOW_ACTION_TYPE_END,
292 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
293 RTE_FLOW_ACTION_TYPE_QUEUE,
294 RTE_FLOW_ACTION_TYPE_MARK,
295 RTE_FLOW_ACTION_TYPE_FLAG,
296 RTE_FLOW_ACTION_TYPE_DROP,
297 RTE_FLOW_ACTION_TYPE_COUNT,
298 RTE_FLOW_ACTION_TYPE_END,
301 /** Action capabilities indexed by NIC version information */
302 static const struct enic_action_cap enic_action_cap[] = {
303 [FILTER_ACTION_RQ_STEERING_FLAG] = {
304 .actions = enic_supported_actions_v1,
305 .copy_fn = enic_copy_action_v1,
307 [FILTER_ACTION_FILTER_ID_FLAG] = {
308 .actions = enic_supported_actions_v2_id,
309 .copy_fn = enic_copy_action_v2,
311 [FILTER_ACTION_DROP_FLAG] = {
312 .actions = enic_supported_actions_v2_drop,
313 .copy_fn = enic_copy_action_v2,
315 [FILTER_ACTION_COUNTER_FLAG] = {
316 .actions = enic_supported_actions_v2_count,
317 .copy_fn = enic_copy_action_v2,
322 mask_exact_match(const u8 *supported, const u8 *supplied,
326 for (i = 0; i < size; i++) {
327 if (supported[i] != supplied[i])
334 * Copy IPv4 item into version 1 NIC filter.
337 * Item specification.
338 * @param enic_filter[out]
339 * Partially filled in NIC filter structure.
340 * @param inner_ofst[in]
341 * Should always be 0 for version 1.
344 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
345 struct filter_v2 *enic_filter, u8 *inner_ofst)
347 const struct rte_flow_item_ipv4 *spec = item->spec;
348 const struct rte_flow_item_ipv4 *mask = item->mask;
349 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
350 struct ipv4_hdr supported_mask = {
351 .src_addr = 0xffffffff,
352 .dst_addr = 0xffffffff,
361 mask = &rte_flow_item_ipv4_mask;
363 /* This is an exact match filter, both fields must be set */
364 if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
365 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
369 /* check that the suppied mask exactly matches capabilty */
370 if (!mask_exact_match((const u8 *)&supported_mask,
371 (const u8 *)item->mask, sizeof(*mask))) {
372 FLOW_LOG(ERR, "IPv4 exact match mask");
376 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
377 enic_5tup->src_addr = spec->hdr.src_addr;
378 enic_5tup->dst_addr = spec->hdr.dst_addr;
384 * Copy UDP item into version 1 NIC filter.
387 * Item specification.
388 * @param enic_filter[out]
389 * Partially filled in NIC filter structure.
390 * @param inner_ofst[in]
391 * Should always be 0 for version 1.
394 enic_copy_item_udp_v1(const struct rte_flow_item *item,
395 struct filter_v2 *enic_filter, u8 *inner_ofst)
397 const struct rte_flow_item_udp *spec = item->spec;
398 const struct rte_flow_item_udp *mask = item->mask;
399 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
400 struct udp_hdr supported_mask = {
411 mask = &rte_flow_item_udp_mask;
413 /* This is an exact match filter, both ports must be set */
414 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
415 FLOW_LOG(ERR, "UDP exact match src/dst addr");
419 /* check that the suppied mask exactly matches capabilty */
420 if (!mask_exact_match((const u8 *)&supported_mask,
421 (const u8 *)item->mask, sizeof(*mask))) {
422 FLOW_LOG(ERR, "UDP exact match mask");
426 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
427 enic_5tup->src_port = spec->hdr.src_port;
428 enic_5tup->dst_port = spec->hdr.dst_port;
429 enic_5tup->protocol = PROTO_UDP;
435 * Copy TCP item into version 1 NIC filter.
438 * Item specification.
439 * @param enic_filter[out]
440 * Partially filled in NIC filter structure.
441 * @param inner_ofst[in]
442 * Should always be 0 for version 1.
445 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
446 struct filter_v2 *enic_filter, u8 *inner_ofst)
448 const struct rte_flow_item_tcp *spec = item->spec;
449 const struct rte_flow_item_tcp *mask = item->mask;
450 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
451 struct tcp_hdr supported_mask = {
462 mask = &rte_flow_item_tcp_mask;
464 /* This is an exact match filter, both ports must be set */
465 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
466 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
470 /* check that the suppied mask exactly matches capabilty */
471 if (!mask_exact_match((const u8 *)&supported_mask,
472 (const u8 *)item->mask, sizeof(*mask))) {
473 FLOW_LOG(ERR, "TCP exact match mask");
477 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
478 enic_5tup->src_port = spec->hdr.src_port;
479 enic_5tup->dst_port = spec->hdr.dst_port;
480 enic_5tup->protocol = PROTO_TCP;
486 * Copy ETH item into version 2 NIC filter.
489 * Item specification.
490 * @param enic_filter[out]
491 * Partially filled in NIC filter structure.
492 * @param inner_ofst[in]
493 * If zero, this is an outer header. If non-zero, this is the offset into L5
494 * where the header begins.
497 enic_copy_item_eth_v2(const struct rte_flow_item *item,
498 struct filter_v2 *enic_filter, u8 *inner_ofst)
500 struct ether_hdr enic_spec;
501 struct ether_hdr enic_mask;
502 const struct rte_flow_item_eth *spec = item->spec;
503 const struct rte_flow_item_eth *mask = item->mask;
504 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
508 /* Match all if no spec */
513 mask = &rte_flow_item_eth_mask;
515 memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
517 memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
520 memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
522 memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
524 enic_spec.ether_type = spec->type;
525 enic_mask.ether_type = mask->type;
527 if (*inner_ofst == 0) {
529 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
530 sizeof(struct ether_hdr));
531 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
532 sizeof(struct ether_hdr));
535 if ((*inner_ofst + sizeof(struct ether_hdr)) >
536 FILTER_GENERIC_1_KEY_LEN)
538 /* Offset into L5 where inner Ethernet header goes */
539 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
540 &enic_mask, sizeof(struct ether_hdr));
541 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
542 &enic_spec, sizeof(struct ether_hdr));
543 *inner_ofst += sizeof(struct ether_hdr);
549 * Copy VLAN item into version 2 NIC filter.
552 * Item specification.
553 * @param enic_filter[out]
554 * Partially filled in NIC filter structure.
555 * @param inner_ofst[in]
556 * If zero, this is an outer header. If non-zero, this is the offset into L5
557 * where the header begins.
560 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
561 struct filter_v2 *enic_filter, u8 *inner_ofst)
563 const struct rte_flow_item_vlan *spec = item->spec;
564 const struct rte_flow_item_vlan *mask = item->mask;
565 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
569 /* Match all if no spec */
574 mask = &rte_flow_item_vlan_mask;
576 if (*inner_ofst == 0) {
577 struct ether_hdr *eth_mask =
578 (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
579 struct ether_hdr *eth_val =
580 (void *)gp->layer[FILTER_GENERIC_1_L2].val;
582 /* Outer TPID cannot be matched */
583 if (eth_mask->ether_type)
585 eth_mask->ether_type = mask->inner_type;
586 eth_val->ether_type = spec->inner_type;
588 /* Outer header. Use the vlan mask/val fields */
589 gp->mask_vlan = mask->tci;
590 gp->val_vlan = spec->tci;
592 /* Inner header. Mask/Val start at *inner_ofst into L5 */
593 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
594 FILTER_GENERIC_1_KEY_LEN)
596 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
597 mask, sizeof(struct vlan_hdr));
598 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
599 spec, sizeof(struct vlan_hdr));
600 *inner_ofst += sizeof(struct vlan_hdr);
606 * Copy IPv4 item into version 2 NIC filter.
609 * Item specification.
610 * @param enic_filter[out]
611 * Partially filled in NIC filter structure.
612 * @param inner_ofst[in]
613 * Must be 0. Don't support inner IPv4 filtering.
616 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
617 struct filter_v2 *enic_filter, u8 *inner_ofst)
619 const struct rte_flow_item_ipv4 *spec = item->spec;
620 const struct rte_flow_item_ipv4 *mask = item->mask;
621 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
625 if (*inner_ofst == 0) {
627 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
628 gp->val_flags |= FILTER_GENERIC_1_IPV4;
630 /* Match all if no spec */
635 mask = &rte_flow_item_ipv4_mask;
637 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
638 sizeof(struct ipv4_hdr));
639 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
640 sizeof(struct ipv4_hdr));
642 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
643 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
644 FILTER_GENERIC_1_KEY_LEN)
646 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
647 mask, sizeof(struct ipv4_hdr));
648 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
649 spec, sizeof(struct ipv4_hdr));
650 *inner_ofst += sizeof(struct ipv4_hdr);
656 * Copy IPv6 item into version 2 NIC filter.
659 * Item specification.
660 * @param enic_filter[out]
661 * Partially filled in NIC filter structure.
662 * @param inner_ofst[in]
663 * Must be 0. Don't support inner IPv6 filtering.
666 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
667 struct filter_v2 *enic_filter, u8 *inner_ofst)
669 const struct rte_flow_item_ipv6 *spec = item->spec;
670 const struct rte_flow_item_ipv6 *mask = item->mask;
671 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
676 gp->mask_flags |= FILTER_GENERIC_1_IPV6;
677 gp->val_flags |= FILTER_GENERIC_1_IPV6;
679 /* Match all if no spec */
684 mask = &rte_flow_item_ipv6_mask;
686 if (*inner_ofst == 0) {
687 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
688 sizeof(struct ipv6_hdr));
689 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
690 sizeof(struct ipv6_hdr));
692 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
693 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
694 FILTER_GENERIC_1_KEY_LEN)
696 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
697 mask, sizeof(struct ipv6_hdr));
698 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
699 spec, sizeof(struct ipv6_hdr));
700 *inner_ofst += sizeof(struct ipv6_hdr);
706 * Copy UDP item into version 2 NIC filter.
709 * Item specification.
710 * @param enic_filter[out]
711 * Partially filled in NIC filter structure.
712 * @param inner_ofst[in]
713 * Must be 0. Don't support inner UDP filtering.
716 enic_copy_item_udp_v2(const struct rte_flow_item *item,
717 struct filter_v2 *enic_filter, u8 *inner_ofst)
719 const struct rte_flow_item_udp *spec = item->spec;
720 const struct rte_flow_item_udp *mask = item->mask;
721 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
726 gp->mask_flags |= FILTER_GENERIC_1_UDP;
727 gp->val_flags |= FILTER_GENERIC_1_UDP;
729 /* Match all if no spec */
734 mask = &rte_flow_item_udp_mask;
736 if (*inner_ofst == 0) {
737 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
738 sizeof(struct udp_hdr));
739 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
740 sizeof(struct udp_hdr));
742 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
743 if ((*inner_ofst + sizeof(struct udp_hdr)) >
744 FILTER_GENERIC_1_KEY_LEN)
746 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
747 mask, sizeof(struct udp_hdr));
748 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
749 spec, sizeof(struct udp_hdr));
750 *inner_ofst += sizeof(struct udp_hdr);
756 * Copy TCP item into version 2 NIC filter.
759 * Item specification.
760 * @param enic_filter[out]
761 * Partially filled in NIC filter structure.
762 * @param inner_ofst[in]
763 * Must be 0. Don't support inner TCP filtering.
766 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
767 struct filter_v2 *enic_filter, u8 *inner_ofst)
769 const struct rte_flow_item_tcp *spec = item->spec;
770 const struct rte_flow_item_tcp *mask = item->mask;
771 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
776 gp->mask_flags |= FILTER_GENERIC_1_TCP;
777 gp->val_flags |= FILTER_GENERIC_1_TCP;
779 /* Match all if no spec */
786 if (*inner_ofst == 0) {
787 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
788 sizeof(struct tcp_hdr));
789 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
790 sizeof(struct tcp_hdr));
792 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
793 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
794 FILTER_GENERIC_1_KEY_LEN)
796 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
797 mask, sizeof(struct tcp_hdr));
798 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
799 spec, sizeof(struct tcp_hdr));
800 *inner_ofst += sizeof(struct tcp_hdr);
806 * Copy SCTP item into version 2 NIC filter.
809 * Item specification.
810 * @param enic_filter[out]
811 * Partially filled in NIC filter structure.
812 * @param inner_ofst[in]
813 * Must be 0. Don't support inner SCTP filtering.
816 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
817 struct filter_v2 *enic_filter, u8 *inner_ofst)
819 const struct rte_flow_item_sctp *spec = item->spec;
820 const struct rte_flow_item_sctp *mask = item->mask;
821 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
828 /* Match all if no spec */
833 mask = &rte_flow_item_sctp_mask;
835 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
836 sizeof(struct sctp_hdr));
837 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
838 sizeof(struct sctp_hdr));
843 * Copy UDP item into version 2 NIC filter.
846 * Item specification.
847 * @param enic_filter[out]
848 * Partially filled in NIC filter structure.
849 * @param inner_ofst[in]
850 * Must be 0. VxLAN headers always start at the beginning of L5.
853 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
854 struct filter_v2 *enic_filter, u8 *inner_ofst)
856 const struct rte_flow_item_vxlan *spec = item->spec;
857 const struct rte_flow_item_vxlan *mask = item->mask;
858 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
865 /* Match all if no spec */
870 mask = &rte_flow_item_vxlan_mask;
872 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
873 sizeof(struct vxlan_hdr));
874 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
875 sizeof(struct vxlan_hdr));
877 *inner_ofst = sizeof(struct vxlan_hdr);
882 * Return 1 if current item is valid on top of the previous one.
884 * @param prev_item[in]
885 * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
887 * @param item_info[in]
888 * Info about this item, like valid previous items.
889 * @param is_first[in]
890 * True if this the first item in the pattern.
893 item_stacking_valid(enum rte_flow_item_type prev_item,
894 const struct enic_items *item_info, u8 is_first_item)
896 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
900 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
901 if (prev_item == *allowed_items)
905 /* This is the first item in the stack. Check if that's cool */
906 if (is_first_item && item_info->valid_start_item)
913 * Build the intenal enic filter structure from the provided pattern. The
914 * pattern is validated as the items are copied.
917 * @param items_info[in]
918 * Info about this NICs item support, like valid previous items.
919 * @param enic_filter[out]
920 * NIC specfilc filters derived from the pattern.
924 enic_copy_filter(const struct rte_flow_item pattern[],
925 const struct enic_items *items_info,
926 struct filter_v2 *enic_filter,
927 struct rte_flow_error *error)
930 const struct rte_flow_item *item = pattern;
931 u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
932 enum rte_flow_item_type prev_item;
933 const struct enic_items *item_info;
935 u8 is_first_item = 1;
941 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
942 /* Get info about how to validate and copy the item. If NULL
943 * is returned the nic does not support the item.
945 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
948 item_info = &items_info[item->type];
950 /* check to see if item stacking is valid */
951 if (!item_stacking_valid(prev_item, item_info, is_first_item))
954 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
956 goto item_not_supported;
957 prev_item = item->type;
963 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
964 NULL, "enic type error");
968 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
969 item, "stacking error");
974 * Build the intenal version 1 NIC action structure from the provided pattern.
975 * The pattern is validated as the items are copied.
978 * @param enic_action[out]
979 * NIC specfilc actions derived from the actions.
983 enic_copy_action_v1(const struct rte_flow_action actions[],
984 struct filter_action_v2 *enic_action)
987 uint32_t overlap = 0;
991 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
992 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
995 switch (actions->type) {
996 case RTE_FLOW_ACTION_TYPE_QUEUE: {
997 const struct rte_flow_action_queue *queue =
998 (const struct rte_flow_action_queue *)
1004 enic_action->rq_idx =
1005 enic_rte_rq_idx_to_sop_idx(queue->index);
1013 if (!(overlap & FATE))
1015 enic_action->type = FILTER_ACTION_RQ_STEERING;
1020 * Build the intenal version 2 NIC action structure from the provided pattern.
1021 * The pattern is validated as the items are copied.
1023 * @param actions[in]
1024 * @param enic_action[out]
1025 * NIC specfilc actions derived from the actions.
1029 enic_copy_action_v2(const struct rte_flow_action actions[],
1030 struct filter_action_v2 *enic_action)
1032 enum { FATE = 1, MARK = 2, };
1033 uint32_t overlap = 0;
1037 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1038 switch (actions->type) {
1039 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1040 const struct rte_flow_action_queue *queue =
1041 (const struct rte_flow_action_queue *)
1047 enic_action->rq_idx =
1048 enic_rte_rq_idx_to_sop_idx(queue->index);
1049 enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1052 case RTE_FLOW_ACTION_TYPE_MARK: {
1053 const struct rte_flow_action_mark *mark =
1054 (const struct rte_flow_action_mark *)
1060 /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
1061 * in the range of allows mark ids.
1063 if (mark->id >= ENIC_MAGIC_FILTER_ID)
1065 enic_action->filter_id = mark->id;
1066 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1069 case RTE_FLOW_ACTION_TYPE_FLAG: {
1073 enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1074 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1077 case RTE_FLOW_ACTION_TYPE_DROP: {
1081 enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1084 case RTE_FLOW_ACTION_TYPE_COUNT: {
1085 enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1088 case RTE_FLOW_ACTION_TYPE_VOID:
1095 if (!(overlap & FATE))
1097 enic_action->type = FILTER_ACTION_V2;
1101 /** Check if the action is supported */
1103 enic_match_action(const struct rte_flow_action *action,
1104 const enum rte_flow_action_type *supported_actions)
1106 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1107 supported_actions++) {
1108 if (action->type == *supported_actions)
1114 /** Get the NIC filter capabilties structure */
1115 static const struct enic_filter_cap *
1116 enic_get_filter_cap(struct enic *enic)
1118 if (enic->flow_filter_mode)
1119 return &enic_filter_cap[enic->flow_filter_mode];
1124 /** Get the actions for this NIC version. */
1125 static const struct enic_action_cap *
1126 enic_get_action_cap(struct enic *enic)
1128 const struct enic_action_cap *ea;
1131 actions = enic->filter_actions;
1132 if (actions & FILTER_ACTION_COUNTER_FLAG)
1133 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1134 else if (actions & FILTER_ACTION_DROP_FLAG)
1135 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1136 else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1137 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1139 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1143 /* Debug function to dump internal NIC action structure. */
1145 enic_dump_actions(const struct filter_action_v2 *ea)
1147 if (ea->type == FILTER_ACTION_RQ_STEERING) {
1148 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1149 } else if (ea->type == FILTER_ACTION_V2) {
1150 FLOW_LOG(INFO, "Actions(V2)\n");
1151 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1152 FLOW_LOG(INFO, "\tqueue: %u\n",
1153 enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1154 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1155 FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1159 /* Debug function to dump internal NIC filter structure. */
1161 enic_dump_filter(const struct filter_v2 *filt)
1163 const struct filter_generic_1 *gp;
1166 char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1167 char l4csum[16], ipfrag[16];
1169 switch (filt->type) {
1170 case FILTER_IPV4_5TUPLE:
1171 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1173 case FILTER_USNIC_IP:
1175 /* FIXME: this should be a loop */
1176 gp = &filt->u.generic_1;
1177 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1178 gp->val_vlan, gp->mask_vlan);
1180 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1182 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1183 ? "ip4(y)" : "ip4(n)");
1185 sprintf(ip4, "%s ", "ip4(x)");
1187 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1189 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1190 ? "ip6(y)" : "ip6(n)");
1192 sprintf(ip6, "%s ", "ip6(x)");
1194 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1196 (gp->val_flags & FILTER_GENERIC_1_UDP)
1197 ? "udp(y)" : "udp(n)");
1199 sprintf(udp, "%s ", "udp(x)");
1201 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1203 (gp->val_flags & FILTER_GENERIC_1_TCP)
1204 ? "tcp(y)" : "tcp(n)");
1206 sprintf(tcp, "%s ", "tcp(x)");
1208 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1209 sprintf(tcpudp, "%s ",
1210 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1211 ? "tcpudp(y)" : "tcpudp(n)");
1213 sprintf(tcpudp, "%s ", "tcpudp(x)");
1215 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1216 sprintf(ip4csum, "%s ",
1217 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1218 ? "ip4csum(y)" : "ip4csum(n)");
1220 sprintf(ip4csum, "%s ", "ip4csum(x)");
1222 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1223 sprintf(l4csum, "%s ",
1224 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1225 ? "l4csum(y)" : "l4csum(n)");
1227 sprintf(l4csum, "%s ", "l4csum(x)");
1229 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1230 sprintf(ipfrag, "%s ",
1231 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1232 ? "ipfrag(y)" : "ipfrag(n)");
1234 sprintf(ipfrag, "%s ", "ipfrag(x)");
1235 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1236 tcp, tcpudp, ip4csum, l4csum, ipfrag);
1238 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1239 mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1240 while (mbyte && !gp->layer[i].mask[mbyte])
1246 for (j = 0; j <= mbyte; j++) {
1248 gp->layer[i].mask[j]);
1252 FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1254 for (j = 0; j <= mbyte; j++) {
1256 gp->layer[i].val[j]);
1260 FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
1264 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1269 /* Debug function to dump internal NIC flow structures. */
1271 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1273 enic_dump_filter(filt);
1274 enic_dump_actions(ea);
1279 * Internal flow parse/validate function.
1282 * This device pointer.
1283 * @param pattern[in]
1284 * @param actions[in]
1286 * @param enic_filter[out]
1287 * Internal NIC filter structure pointer.
1288 * @param enic_action[out]
1289 * Internal NIC action structure pointer.
1292 enic_flow_parse(struct rte_eth_dev *dev,
1293 const struct rte_flow_attr *attrs,
1294 const struct rte_flow_item pattern[],
1295 const struct rte_flow_action actions[],
1296 struct rte_flow_error *error,
1297 struct filter_v2 *enic_filter,
1298 struct filter_action_v2 *enic_action)
1300 unsigned int ret = 0;
1301 struct enic *enic = pmd_priv(dev);
1302 const struct enic_filter_cap *enic_filter_cap;
1303 const struct enic_action_cap *enic_action_cap;
1304 const struct rte_flow_action *action;
1308 memset(enic_filter, 0, sizeof(*enic_filter));
1309 memset(enic_action, 0, sizeof(*enic_action));
1312 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1313 NULL, "No pattern specified");
1318 rte_flow_error_set(error, EINVAL,
1319 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1320 NULL, "No action specified");
1326 rte_flow_error_set(error, ENOTSUP,
1327 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1329 "priority groups are not supported");
1331 } else if (attrs->priority) {
1332 rte_flow_error_set(error, ENOTSUP,
1333 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1335 "priorities are not supported");
1337 } else if (attrs->egress) {
1338 rte_flow_error_set(error, ENOTSUP,
1339 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1341 "egress is not supported");
1343 } else if (attrs->transfer) {
1344 rte_flow_error_set(error, ENOTSUP,
1345 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1347 "transfer is not supported");
1349 } else if (!attrs->ingress) {
1350 rte_flow_error_set(error, ENOTSUP,
1351 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1353 "only ingress is supported");
1358 rte_flow_error_set(error, EINVAL,
1359 RTE_FLOW_ERROR_TYPE_ATTR,
1360 NULL, "No attribute specified");
1364 /* Verify Actions. */
1365 enic_action_cap = enic_get_action_cap(enic);
1366 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1368 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1370 else if (!enic_match_action(action, enic_action_cap->actions))
1373 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1374 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1375 action, "Invalid action.");
1378 ret = enic_action_cap->copy_fn(actions, enic_action);
1380 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1381 NULL, "Unsupported action.");
1385 /* Verify Flow items. If copying the filter from flow format to enic
1386 * format fails, the flow is not supported
1388 enic_filter_cap = enic_get_filter_cap(enic);
1389 if (enic_filter_cap == NULL) {
1390 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1391 NULL, "Flow API not available");
1394 enic_filter->type = enic->flow_filter_mode;
1395 ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
1396 enic_filter, error);
1401 * Push filter/action to the NIC.
1404 * Device structure pointer.
1405 * @param enic_filter[in]
1406 * Internal NIC filter structure pointer.
1407 * @param enic_action[in]
1408 * Internal NIC action structure pointer.
1411 static struct rte_flow *
1412 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1413 struct filter_action_v2 *enic_action,
1414 struct rte_flow_error *error)
1416 struct rte_flow *flow;
1420 int last_max_flow_ctr;
1424 flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1426 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1427 NULL, "cannot allocate flow memory");
1431 flow->counter_idx = -1;
1432 last_max_flow_ctr = -1;
1433 if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1434 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1435 rte_flow_error_set(error, ENOMEM,
1436 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1437 NULL, "cannot allocate counter");
1438 goto unwind_flow_alloc;
1440 flow->counter_idx = ctr_idx;
1441 enic_action->counter_index = ctr_idx;
1443 /* If index is the largest, increase the counter DMA size */
1444 if (ctr_idx > enic->max_flow_counter) {
1445 err = vnic_dev_counter_dma_cfg(enic->vdev,
1446 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1449 rte_flow_error_set(error, -err,
1450 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1451 NULL, "counter DMA config failed");
1452 goto unwind_ctr_alloc;
1454 last_max_flow_ctr = enic->max_flow_counter;
1455 enic->max_flow_counter = ctr_idx;
1459 /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1460 entry = enic_action->rq_idx;
1461 err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1464 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1465 NULL, "vnic_dev_classifier error");
1466 goto unwind_ctr_dma_cfg;
1469 flow->enic_filter_id = entry;
1470 flow->enic_filter = *enic_filter;
1474 /* unwind if there are errors */
1476 if (last_max_flow_ctr != -1) {
1477 /* reduce counter DMA size */
1478 vnic_dev_counter_dma_cfg(enic->vdev,
1479 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1480 last_max_flow_ctr + 1);
1481 enic->max_flow_counter = last_max_flow_ctr;
1484 if (flow->counter_idx != -1)
1485 vnic_dev_counter_free(enic->vdev, ctr_idx);
1492 * Remove filter/action from the NIC.
1495 * Device structure pointer.
1496 * @param filter_id[in]
1498 * @param enic_action[in]
1499 * Internal NIC action structure pointer.
1503 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1504 struct rte_flow_error *error)
1511 filter_id = flow->enic_filter_id;
1512 err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1514 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1515 NULL, "vnic_dev_classifier failed");
1519 if (flow->counter_idx != -1) {
1520 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1521 dev_err(enic, "counter free failed, idx: %d\n",
1523 flow->counter_idx = -1;
1529 * The following functions are callbacks for Generic flow API.
1533 * Validate a flow supported by the NIC.
1535 * @see rte_flow_validate()
1539 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1540 const struct rte_flow_item pattern[],
1541 const struct rte_flow_action actions[],
1542 struct rte_flow_error *error)
1544 struct filter_v2 enic_filter;
1545 struct filter_action_v2 enic_action;
1550 ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1551 &enic_filter, &enic_action);
1553 enic_dump_flow(&enic_action, &enic_filter);
1558 * Create a flow supported by the NIC.
1560 * @see rte_flow_create()
1563 static struct rte_flow *
1564 enic_flow_create(struct rte_eth_dev *dev,
1565 const struct rte_flow_attr *attrs,
1566 const struct rte_flow_item pattern[],
1567 const struct rte_flow_action actions[],
1568 struct rte_flow_error *error)
1571 struct filter_v2 enic_filter;
1572 struct filter_action_v2 enic_action;
1573 struct rte_flow *flow;
1574 struct enic *enic = pmd_priv(dev);
1578 ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1583 rte_spinlock_lock(&enic->flows_lock);
1584 flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1587 LIST_INSERT_HEAD(&enic->flows, flow, next);
1588 rte_spinlock_unlock(&enic->flows_lock);
1594 * Destroy a flow supported by the NIC.
1596 * @see rte_flow_destroy()
1600 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1601 __rte_unused struct rte_flow_error *error)
1603 struct enic *enic = pmd_priv(dev);
1607 rte_spinlock_lock(&enic->flows_lock);
1608 enic_flow_del_filter(enic, flow, error);
1609 LIST_REMOVE(flow, next);
1610 rte_spinlock_unlock(&enic->flows_lock);
1616 * Flush all flows on the device.
1618 * @see rte_flow_flush()
1622 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1624 struct rte_flow *flow;
1625 struct enic *enic = pmd_priv(dev);
1629 rte_spinlock_lock(&enic->flows_lock);
1631 while (!LIST_EMPTY(&enic->flows)) {
1632 flow = LIST_FIRST(&enic->flows);
1633 enic_flow_del_filter(enic, flow, error);
1634 LIST_REMOVE(flow, next);
1637 rte_spinlock_unlock(&enic->flows_lock);
1642 enic_flow_query_count(struct rte_eth_dev *dev,
1643 struct rte_flow *flow, void *data,
1644 struct rte_flow_error *error)
1646 struct enic *enic = pmd_priv(dev);
1647 struct rte_flow_query_count *query;
1648 uint64_t packets, bytes;
1652 if (flow->counter_idx == -1) {
1653 return rte_flow_error_set(error, ENOTSUP,
1654 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1656 "flow does not have counter");
1658 query = (struct rte_flow_query_count *)data;
1659 if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1660 !!query->reset, &packets, &bytes)) {
1661 return rte_flow_error_set
1663 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1665 "cannot read counter");
1667 query->hits_set = 1;
1668 query->bytes_set = 1;
1669 query->hits = packets;
1670 query->bytes = bytes;
1675 enic_flow_query(struct rte_eth_dev *dev,
1676 struct rte_flow *flow,
1677 const struct rte_flow_action *actions,
1679 struct rte_flow_error *error)
1685 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1686 switch (actions->type) {
1687 case RTE_FLOW_ACTION_TYPE_VOID:
1689 case RTE_FLOW_ACTION_TYPE_COUNT:
1690 ret = enic_flow_query_count(dev, flow, data, error);
1693 return rte_flow_error_set(error, ENOTSUP,
1694 RTE_FLOW_ERROR_TYPE_ACTION,
1696 "action not supported");
1705 * Flow callback registration.
1709 const struct rte_flow_ops enic_flow_ops = {
1710 .validate = enic_flow_validate,
1711 .create = enic_flow_create,
1712 .destroy = enic_flow_destroy,
1713 .flush = enic_flow_flush,
1714 .query = enic_flow_query,