1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
14 #include "enic_compat.h"
19 #define FLOW_TRACE() \
20 rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
22 #define FLOW_LOG(level, fmt, args...) \
23 rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
26 /** Info about how to copy items into enic filters. */
28 /** Function for copying and validating an item. */
29 int (*copy_item)(const struct rte_flow_item *item,
30 struct filter_v2 *enic_filter, u8 *inner_ofst);
31 /** List of valid previous items. */
32 const enum rte_flow_item_type * const prev_items;
33 /** True if it's OK for this item to be the first item. For some NIC
34 * versions, it's invalid to start the stack above layer 3.
36 const u8 valid_start_item;
39 /** Filtering capabilities for various NIC and firmware versions. */
40 struct enic_filter_cap {
41 /** list of valid items and their handlers and attributes. */
42 const struct enic_items *item_info;
43 /* Max type in the above list, used to detect unsupported types */
44 enum rte_flow_item_type max_item_type;
47 /* functions for copying flow actions into enic actions */
48 typedef int (copy_action_fn)(struct enic *enic,
49 const struct rte_flow_action actions[],
50 struct filter_action_v2 *enic_action);
52 /* functions for copying items into enic filters */
53 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
54 struct filter_v2 *enic_filter, u8 *inner_ofst);
56 /** Action capabilities for various NICs. */
57 struct enic_action_cap {
58 /** list of valid actions */
59 const enum rte_flow_action_type *actions;
60 /** copy function for a particular NIC */
61 copy_action_fn *copy_fn;
64 /* Forward declarations */
65 static enic_copy_item_fn enic_copy_item_ipv4_v1;
66 static enic_copy_item_fn enic_copy_item_udp_v1;
67 static enic_copy_item_fn enic_copy_item_tcp_v1;
68 static enic_copy_item_fn enic_copy_item_eth_v2;
69 static enic_copy_item_fn enic_copy_item_vlan_v2;
70 static enic_copy_item_fn enic_copy_item_ipv4_v2;
71 static enic_copy_item_fn enic_copy_item_ipv6_v2;
72 static enic_copy_item_fn enic_copy_item_udp_v2;
73 static enic_copy_item_fn enic_copy_item_tcp_v2;
74 static enic_copy_item_fn enic_copy_item_sctp_v2;
75 static enic_copy_item_fn enic_copy_item_vxlan_v2;
76 static copy_action_fn enic_copy_action_v1;
77 static copy_action_fn enic_copy_action_v2;
80 * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
83 static const struct enic_items enic_items_v1[] = {
84 [RTE_FLOW_ITEM_TYPE_IPV4] = {
85 .copy_item = enic_copy_item_ipv4_v1,
86 .valid_start_item = 1,
87 .prev_items = (const enum rte_flow_item_type[]) {
88 RTE_FLOW_ITEM_TYPE_END,
91 [RTE_FLOW_ITEM_TYPE_UDP] = {
92 .copy_item = enic_copy_item_udp_v1,
93 .valid_start_item = 0,
94 .prev_items = (const enum rte_flow_item_type[]) {
95 RTE_FLOW_ITEM_TYPE_IPV4,
96 RTE_FLOW_ITEM_TYPE_END,
99 [RTE_FLOW_ITEM_TYPE_TCP] = {
100 .copy_item = enic_copy_item_tcp_v1,
101 .valid_start_item = 0,
102 .prev_items = (const enum rte_flow_item_type[]) {
103 RTE_FLOW_ITEM_TYPE_IPV4,
104 RTE_FLOW_ITEM_TYPE_END,
110 * NICs have Advanced Filters capability but they are disabled. This means
111 * that layer 3 must be specified.
113 static const struct enic_items enic_items_v2[] = {
114 [RTE_FLOW_ITEM_TYPE_ETH] = {
115 .copy_item = enic_copy_item_eth_v2,
116 .valid_start_item = 1,
117 .prev_items = (const enum rte_flow_item_type[]) {
118 RTE_FLOW_ITEM_TYPE_VXLAN,
119 RTE_FLOW_ITEM_TYPE_END,
122 [RTE_FLOW_ITEM_TYPE_VLAN] = {
123 .copy_item = enic_copy_item_vlan_v2,
124 .valid_start_item = 1,
125 .prev_items = (const enum rte_flow_item_type[]) {
126 RTE_FLOW_ITEM_TYPE_ETH,
127 RTE_FLOW_ITEM_TYPE_END,
130 [RTE_FLOW_ITEM_TYPE_IPV4] = {
131 .copy_item = enic_copy_item_ipv4_v2,
132 .valid_start_item = 1,
133 .prev_items = (const enum rte_flow_item_type[]) {
134 RTE_FLOW_ITEM_TYPE_ETH,
135 RTE_FLOW_ITEM_TYPE_VLAN,
136 RTE_FLOW_ITEM_TYPE_END,
139 [RTE_FLOW_ITEM_TYPE_IPV6] = {
140 .copy_item = enic_copy_item_ipv6_v2,
141 .valid_start_item = 1,
142 .prev_items = (const enum rte_flow_item_type[]) {
143 RTE_FLOW_ITEM_TYPE_ETH,
144 RTE_FLOW_ITEM_TYPE_VLAN,
145 RTE_FLOW_ITEM_TYPE_END,
148 [RTE_FLOW_ITEM_TYPE_UDP] = {
149 .copy_item = enic_copy_item_udp_v2,
150 .valid_start_item = 0,
151 .prev_items = (const enum rte_flow_item_type[]) {
152 RTE_FLOW_ITEM_TYPE_IPV4,
153 RTE_FLOW_ITEM_TYPE_IPV6,
154 RTE_FLOW_ITEM_TYPE_END,
157 [RTE_FLOW_ITEM_TYPE_TCP] = {
158 .copy_item = enic_copy_item_tcp_v2,
159 .valid_start_item = 0,
160 .prev_items = (const enum rte_flow_item_type[]) {
161 RTE_FLOW_ITEM_TYPE_IPV4,
162 RTE_FLOW_ITEM_TYPE_IPV6,
163 RTE_FLOW_ITEM_TYPE_END,
166 [RTE_FLOW_ITEM_TYPE_SCTP] = {
167 .copy_item = enic_copy_item_sctp_v2,
168 .valid_start_item = 0,
169 .prev_items = (const enum rte_flow_item_type[]) {
170 RTE_FLOW_ITEM_TYPE_IPV4,
171 RTE_FLOW_ITEM_TYPE_IPV6,
172 RTE_FLOW_ITEM_TYPE_END,
175 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
176 .copy_item = enic_copy_item_vxlan_v2,
177 .valid_start_item = 0,
178 .prev_items = (const enum rte_flow_item_type[]) {
179 RTE_FLOW_ITEM_TYPE_UDP,
180 RTE_FLOW_ITEM_TYPE_END,
185 /** NICs with Advanced filters enabled */
186 static const struct enic_items enic_items_v3[] = {
187 [RTE_FLOW_ITEM_TYPE_ETH] = {
188 .copy_item = enic_copy_item_eth_v2,
189 .valid_start_item = 1,
190 .prev_items = (const enum rte_flow_item_type[]) {
191 RTE_FLOW_ITEM_TYPE_VXLAN,
192 RTE_FLOW_ITEM_TYPE_END,
195 [RTE_FLOW_ITEM_TYPE_VLAN] = {
196 .copy_item = enic_copy_item_vlan_v2,
197 .valid_start_item = 1,
198 .prev_items = (const enum rte_flow_item_type[]) {
199 RTE_FLOW_ITEM_TYPE_ETH,
200 RTE_FLOW_ITEM_TYPE_END,
203 [RTE_FLOW_ITEM_TYPE_IPV4] = {
204 .copy_item = enic_copy_item_ipv4_v2,
205 .valid_start_item = 1,
206 .prev_items = (const enum rte_flow_item_type[]) {
207 RTE_FLOW_ITEM_TYPE_ETH,
208 RTE_FLOW_ITEM_TYPE_VLAN,
209 RTE_FLOW_ITEM_TYPE_END,
212 [RTE_FLOW_ITEM_TYPE_IPV6] = {
213 .copy_item = enic_copy_item_ipv6_v2,
214 .valid_start_item = 1,
215 .prev_items = (const enum rte_flow_item_type[]) {
216 RTE_FLOW_ITEM_TYPE_ETH,
217 RTE_FLOW_ITEM_TYPE_VLAN,
218 RTE_FLOW_ITEM_TYPE_END,
221 [RTE_FLOW_ITEM_TYPE_UDP] = {
222 .copy_item = enic_copy_item_udp_v2,
223 .valid_start_item = 1,
224 .prev_items = (const enum rte_flow_item_type[]) {
225 RTE_FLOW_ITEM_TYPE_IPV4,
226 RTE_FLOW_ITEM_TYPE_IPV6,
227 RTE_FLOW_ITEM_TYPE_END,
230 [RTE_FLOW_ITEM_TYPE_TCP] = {
231 .copy_item = enic_copy_item_tcp_v2,
232 .valid_start_item = 1,
233 .prev_items = (const enum rte_flow_item_type[]) {
234 RTE_FLOW_ITEM_TYPE_IPV4,
235 RTE_FLOW_ITEM_TYPE_IPV6,
236 RTE_FLOW_ITEM_TYPE_END,
239 [RTE_FLOW_ITEM_TYPE_SCTP] = {
240 .copy_item = enic_copy_item_sctp_v2,
241 .valid_start_item = 0,
242 .prev_items = (const enum rte_flow_item_type[]) {
243 RTE_FLOW_ITEM_TYPE_IPV4,
244 RTE_FLOW_ITEM_TYPE_IPV6,
245 RTE_FLOW_ITEM_TYPE_END,
248 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
249 .copy_item = enic_copy_item_vxlan_v2,
250 .valid_start_item = 1,
251 .prev_items = (const enum rte_flow_item_type[]) {
252 RTE_FLOW_ITEM_TYPE_UDP,
253 RTE_FLOW_ITEM_TYPE_END,
258 /** Filtering capabilities indexed this NICs supported filter type. */
259 static const struct enic_filter_cap enic_filter_cap[] = {
260 [FILTER_IPV4_5TUPLE] = {
261 .item_info = enic_items_v1,
262 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
264 [FILTER_USNIC_IP] = {
265 .item_info = enic_items_v2,
266 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
269 .item_info = enic_items_v3,
270 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
274 /** Supported actions for older NICs */
275 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
276 RTE_FLOW_ACTION_TYPE_QUEUE,
277 RTE_FLOW_ACTION_TYPE_END,
280 /** Supported actions for newer NICs */
281 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
282 RTE_FLOW_ACTION_TYPE_QUEUE,
283 RTE_FLOW_ACTION_TYPE_MARK,
284 RTE_FLOW_ACTION_TYPE_FLAG,
285 RTE_FLOW_ACTION_TYPE_RSS,
286 RTE_FLOW_ACTION_TYPE_PASSTHRU,
287 RTE_FLOW_ACTION_TYPE_END,
290 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
291 RTE_FLOW_ACTION_TYPE_QUEUE,
292 RTE_FLOW_ACTION_TYPE_MARK,
293 RTE_FLOW_ACTION_TYPE_FLAG,
294 RTE_FLOW_ACTION_TYPE_DROP,
295 RTE_FLOW_ACTION_TYPE_RSS,
296 RTE_FLOW_ACTION_TYPE_PASSTHRU,
297 RTE_FLOW_ACTION_TYPE_END,
300 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
301 RTE_FLOW_ACTION_TYPE_QUEUE,
302 RTE_FLOW_ACTION_TYPE_MARK,
303 RTE_FLOW_ACTION_TYPE_FLAG,
304 RTE_FLOW_ACTION_TYPE_DROP,
305 RTE_FLOW_ACTION_TYPE_COUNT,
306 RTE_FLOW_ACTION_TYPE_RSS,
307 RTE_FLOW_ACTION_TYPE_PASSTHRU,
308 RTE_FLOW_ACTION_TYPE_END,
311 /** Action capabilities indexed by NIC version information */
312 static const struct enic_action_cap enic_action_cap[] = {
313 [FILTER_ACTION_RQ_STEERING_FLAG] = {
314 .actions = enic_supported_actions_v1,
315 .copy_fn = enic_copy_action_v1,
317 [FILTER_ACTION_FILTER_ID_FLAG] = {
318 .actions = enic_supported_actions_v2_id,
319 .copy_fn = enic_copy_action_v2,
321 [FILTER_ACTION_DROP_FLAG] = {
322 .actions = enic_supported_actions_v2_drop,
323 .copy_fn = enic_copy_action_v2,
325 [FILTER_ACTION_COUNTER_FLAG] = {
326 .actions = enic_supported_actions_v2_count,
327 .copy_fn = enic_copy_action_v2,
332 mask_exact_match(const u8 *supported, const u8 *supplied,
336 for (i = 0; i < size; i++) {
337 if (supported[i] != supplied[i])
344 * Copy IPv4 item into version 1 NIC filter.
347 * Item specification.
348 * @param enic_filter[out]
349 * Partially filled in NIC filter structure.
350 * @param inner_ofst[in]
351 * Should always be 0 for version 1.
354 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
355 struct filter_v2 *enic_filter, u8 *inner_ofst)
357 const struct rte_flow_item_ipv4 *spec = item->spec;
358 const struct rte_flow_item_ipv4 *mask = item->mask;
359 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
360 struct ipv4_hdr supported_mask = {
361 .src_addr = 0xffffffff,
362 .dst_addr = 0xffffffff,
371 mask = &rte_flow_item_ipv4_mask;
373 /* This is an exact match filter, both fields must be set */
374 if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
375 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
379 /* check that the suppied mask exactly matches capabilty */
380 if (!mask_exact_match((const u8 *)&supported_mask,
381 (const u8 *)item->mask, sizeof(*mask))) {
382 FLOW_LOG(ERR, "IPv4 exact match mask");
386 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
387 enic_5tup->src_addr = spec->hdr.src_addr;
388 enic_5tup->dst_addr = spec->hdr.dst_addr;
394 * Copy UDP item into version 1 NIC filter.
397 * Item specification.
398 * @param enic_filter[out]
399 * Partially filled in NIC filter structure.
400 * @param inner_ofst[in]
401 * Should always be 0 for version 1.
404 enic_copy_item_udp_v1(const struct rte_flow_item *item,
405 struct filter_v2 *enic_filter, u8 *inner_ofst)
407 const struct rte_flow_item_udp *spec = item->spec;
408 const struct rte_flow_item_udp *mask = item->mask;
409 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
410 struct udp_hdr supported_mask = {
421 mask = &rte_flow_item_udp_mask;
423 /* This is an exact match filter, both ports must be set */
424 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
425 FLOW_LOG(ERR, "UDP exact match src/dst addr");
429 /* check that the suppied mask exactly matches capabilty */
430 if (!mask_exact_match((const u8 *)&supported_mask,
431 (const u8 *)item->mask, sizeof(*mask))) {
432 FLOW_LOG(ERR, "UDP exact match mask");
436 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
437 enic_5tup->src_port = spec->hdr.src_port;
438 enic_5tup->dst_port = spec->hdr.dst_port;
439 enic_5tup->protocol = PROTO_UDP;
445 * Copy TCP item into version 1 NIC filter.
448 * Item specification.
449 * @param enic_filter[out]
450 * Partially filled in NIC filter structure.
451 * @param inner_ofst[in]
452 * Should always be 0 for version 1.
455 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
456 struct filter_v2 *enic_filter, u8 *inner_ofst)
458 const struct rte_flow_item_tcp *spec = item->spec;
459 const struct rte_flow_item_tcp *mask = item->mask;
460 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
461 struct tcp_hdr supported_mask = {
472 mask = &rte_flow_item_tcp_mask;
474 /* This is an exact match filter, both ports must be set */
475 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
476 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
480 /* check that the suppied mask exactly matches capabilty */
481 if (!mask_exact_match((const u8 *)&supported_mask,
482 (const u8 *)item->mask, sizeof(*mask))) {
483 FLOW_LOG(ERR, "TCP exact match mask");
487 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
488 enic_5tup->src_port = spec->hdr.src_port;
489 enic_5tup->dst_port = spec->hdr.dst_port;
490 enic_5tup->protocol = PROTO_TCP;
496 * Copy ETH item into version 2 NIC filter.
499 * Item specification.
500 * @param enic_filter[out]
501 * Partially filled in NIC filter structure.
502 * @param inner_ofst[in]
503 * If zero, this is an outer header. If non-zero, this is the offset into L5
504 * where the header begins.
507 enic_copy_item_eth_v2(const struct rte_flow_item *item,
508 struct filter_v2 *enic_filter, u8 *inner_ofst)
510 struct ether_hdr enic_spec;
511 struct ether_hdr enic_mask;
512 const struct rte_flow_item_eth *spec = item->spec;
513 const struct rte_flow_item_eth *mask = item->mask;
514 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
518 /* Match all if no spec */
523 mask = &rte_flow_item_eth_mask;
525 memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
527 memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
530 memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
532 memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
534 enic_spec.ether_type = spec->type;
535 enic_mask.ether_type = mask->type;
537 if (*inner_ofst == 0) {
539 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
540 sizeof(struct ether_hdr));
541 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
542 sizeof(struct ether_hdr));
545 if ((*inner_ofst + sizeof(struct ether_hdr)) >
546 FILTER_GENERIC_1_KEY_LEN)
548 /* Offset into L5 where inner Ethernet header goes */
549 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
550 &enic_mask, sizeof(struct ether_hdr));
551 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
552 &enic_spec, sizeof(struct ether_hdr));
553 *inner_ofst += sizeof(struct ether_hdr);
559 * Copy VLAN item into version 2 NIC filter.
562 * Item specification.
563 * @param enic_filter[out]
564 * Partially filled in NIC filter structure.
565 * @param inner_ofst[in]
566 * If zero, this is an outer header. If non-zero, this is the offset into L5
567 * where the header begins.
570 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
571 struct filter_v2 *enic_filter, u8 *inner_ofst)
573 const struct rte_flow_item_vlan *spec = item->spec;
574 const struct rte_flow_item_vlan *mask = item->mask;
575 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
579 /* Match all if no spec */
584 mask = &rte_flow_item_vlan_mask;
586 if (*inner_ofst == 0) {
587 struct ether_hdr *eth_mask =
588 (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
589 struct ether_hdr *eth_val =
590 (void *)gp->layer[FILTER_GENERIC_1_L2].val;
592 /* Outer TPID cannot be matched */
593 if (eth_mask->ether_type)
595 eth_mask->ether_type = mask->inner_type;
596 eth_val->ether_type = spec->inner_type;
598 /* Outer header. Use the vlan mask/val fields */
599 gp->mask_vlan = mask->tci;
600 gp->val_vlan = spec->tci;
602 /* Inner header. Mask/Val start at *inner_ofst into L5 */
603 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
604 FILTER_GENERIC_1_KEY_LEN)
606 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
607 mask, sizeof(struct vlan_hdr));
608 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
609 spec, sizeof(struct vlan_hdr));
610 *inner_ofst += sizeof(struct vlan_hdr);
616 * Copy IPv4 item into version 2 NIC filter.
619 * Item specification.
620 * @param enic_filter[out]
621 * Partially filled in NIC filter structure.
622 * @param inner_ofst[in]
623 * Must be 0. Don't support inner IPv4 filtering.
626 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
627 struct filter_v2 *enic_filter, u8 *inner_ofst)
629 const struct rte_flow_item_ipv4 *spec = item->spec;
630 const struct rte_flow_item_ipv4 *mask = item->mask;
631 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
635 if (*inner_ofst == 0) {
637 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
638 gp->val_flags |= FILTER_GENERIC_1_IPV4;
640 /* Match all if no spec */
645 mask = &rte_flow_item_ipv4_mask;
647 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
648 sizeof(struct ipv4_hdr));
649 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
650 sizeof(struct ipv4_hdr));
652 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
653 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
654 FILTER_GENERIC_1_KEY_LEN)
656 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
657 mask, sizeof(struct ipv4_hdr));
658 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
659 spec, sizeof(struct ipv4_hdr));
660 *inner_ofst += sizeof(struct ipv4_hdr);
666 * Copy IPv6 item into version 2 NIC filter.
669 * Item specification.
670 * @param enic_filter[out]
671 * Partially filled in NIC filter structure.
672 * @param inner_ofst[in]
673 * Must be 0. Don't support inner IPv6 filtering.
676 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
677 struct filter_v2 *enic_filter, u8 *inner_ofst)
679 const struct rte_flow_item_ipv6 *spec = item->spec;
680 const struct rte_flow_item_ipv6 *mask = item->mask;
681 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
686 gp->mask_flags |= FILTER_GENERIC_1_IPV6;
687 gp->val_flags |= FILTER_GENERIC_1_IPV6;
689 /* Match all if no spec */
694 mask = &rte_flow_item_ipv6_mask;
696 if (*inner_ofst == 0) {
697 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
698 sizeof(struct ipv6_hdr));
699 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
700 sizeof(struct ipv6_hdr));
702 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
703 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
704 FILTER_GENERIC_1_KEY_LEN)
706 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
707 mask, sizeof(struct ipv6_hdr));
708 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
709 spec, sizeof(struct ipv6_hdr));
710 *inner_ofst += sizeof(struct ipv6_hdr);
716 * Copy UDP item into version 2 NIC filter.
719 * Item specification.
720 * @param enic_filter[out]
721 * Partially filled in NIC filter structure.
722 * @param inner_ofst[in]
723 * Must be 0. Don't support inner UDP filtering.
726 enic_copy_item_udp_v2(const struct rte_flow_item *item,
727 struct filter_v2 *enic_filter, u8 *inner_ofst)
729 const struct rte_flow_item_udp *spec = item->spec;
730 const struct rte_flow_item_udp *mask = item->mask;
731 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
736 gp->mask_flags |= FILTER_GENERIC_1_UDP;
737 gp->val_flags |= FILTER_GENERIC_1_UDP;
739 /* Match all if no spec */
744 mask = &rte_flow_item_udp_mask;
746 if (*inner_ofst == 0) {
747 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
748 sizeof(struct udp_hdr));
749 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
750 sizeof(struct udp_hdr));
752 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
753 if ((*inner_ofst + sizeof(struct udp_hdr)) >
754 FILTER_GENERIC_1_KEY_LEN)
756 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
757 mask, sizeof(struct udp_hdr));
758 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
759 spec, sizeof(struct udp_hdr));
760 *inner_ofst += sizeof(struct udp_hdr);
766 * Copy TCP item into version 2 NIC filter.
769 * Item specification.
770 * @param enic_filter[out]
771 * Partially filled in NIC filter structure.
772 * @param inner_ofst[in]
773 * Must be 0. Don't support inner TCP filtering.
776 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
777 struct filter_v2 *enic_filter, u8 *inner_ofst)
779 const struct rte_flow_item_tcp *spec = item->spec;
780 const struct rte_flow_item_tcp *mask = item->mask;
781 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
786 gp->mask_flags |= FILTER_GENERIC_1_TCP;
787 gp->val_flags |= FILTER_GENERIC_1_TCP;
789 /* Match all if no spec */
796 if (*inner_ofst == 0) {
797 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
798 sizeof(struct tcp_hdr));
799 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
800 sizeof(struct tcp_hdr));
802 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
803 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
804 FILTER_GENERIC_1_KEY_LEN)
806 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
807 mask, sizeof(struct tcp_hdr));
808 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
809 spec, sizeof(struct tcp_hdr));
810 *inner_ofst += sizeof(struct tcp_hdr);
816 * Copy SCTP item into version 2 NIC filter.
819 * Item specification.
820 * @param enic_filter[out]
821 * Partially filled in NIC filter structure.
822 * @param inner_ofst[in]
823 * Must be 0. Don't support inner SCTP filtering.
826 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
827 struct filter_v2 *enic_filter, u8 *inner_ofst)
829 const struct rte_flow_item_sctp *spec = item->spec;
830 const struct rte_flow_item_sctp *mask = item->mask;
831 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
832 uint8_t *ip_proto_mask = NULL;
833 uint8_t *ip_proto = NULL;
841 * The NIC filter API has no flags for "match sctp", so explicitly set
842 * the protocol number in the IP pattern.
844 if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
846 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
847 ip_proto_mask = &ip->next_proto_id;
848 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
849 ip_proto = &ip->next_proto_id;
850 } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
852 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
853 ip_proto_mask = &ip->proto;
854 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
855 ip_proto = &ip->proto;
857 /* Need IPv4/IPv6 pattern first */
860 *ip_proto = IPPROTO_SCTP;
861 *ip_proto_mask = 0xff;
863 /* Match all if no spec */
868 mask = &rte_flow_item_sctp_mask;
870 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
871 sizeof(struct sctp_hdr));
872 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
873 sizeof(struct sctp_hdr));
878 * Copy UDP item into version 2 NIC filter.
881 * Item specification.
882 * @param enic_filter[out]
883 * Partially filled in NIC filter structure.
884 * @param inner_ofst[in]
885 * Must be 0. VxLAN headers always start at the beginning of L5.
888 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
889 struct filter_v2 *enic_filter, u8 *inner_ofst)
891 const struct rte_flow_item_vxlan *spec = item->spec;
892 const struct rte_flow_item_vxlan *mask = item->mask;
893 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
900 /* Match all if no spec */
905 mask = &rte_flow_item_vxlan_mask;
907 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
908 sizeof(struct vxlan_hdr));
909 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
910 sizeof(struct vxlan_hdr));
912 *inner_ofst = sizeof(struct vxlan_hdr);
917 * Return 1 if current item is valid on top of the previous one.
919 * @param prev_item[in]
920 * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
922 * @param item_info[in]
923 * Info about this item, like valid previous items.
924 * @param is_first[in]
925 * True if this the first item in the pattern.
928 item_stacking_valid(enum rte_flow_item_type prev_item,
929 const struct enic_items *item_info, u8 is_first_item)
931 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
935 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
936 if (prev_item == *allowed_items)
940 /* This is the first item in the stack. Check if that's cool */
941 if (is_first_item && item_info->valid_start_item)
948 * Build the intenal enic filter structure from the provided pattern. The
949 * pattern is validated as the items are copied.
952 * @param items_info[in]
953 * Info about this NICs item support, like valid previous items.
954 * @param enic_filter[out]
955 * NIC specfilc filters derived from the pattern.
959 enic_copy_filter(const struct rte_flow_item pattern[],
960 const struct enic_filter_cap *cap,
961 struct filter_v2 *enic_filter,
962 struct rte_flow_error *error)
965 const struct rte_flow_item *item = pattern;
966 u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
967 enum rte_flow_item_type prev_item;
968 const struct enic_items *item_info;
970 u8 is_first_item = 1;
976 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
977 /* Get info about how to validate and copy the item. If NULL
978 * is returned the nic does not support the item.
980 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
983 item_info = &cap->item_info[item->type];
984 if (item->type > cap->max_item_type ||
985 item_info->copy_item == NULL) {
986 rte_flow_error_set(error, ENOTSUP,
987 RTE_FLOW_ERROR_TYPE_ITEM,
988 NULL, "Unsupported item.");
992 /* check to see if item stacking is valid */
993 if (!item_stacking_valid(prev_item, item_info, is_first_item))
996 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
998 goto item_not_supported;
999 prev_item = item->type;
1005 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1006 NULL, "enic type error");
1010 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1011 item, "stacking error");
1016 * Build the intenal version 1 NIC action structure from the provided pattern.
1017 * The pattern is validated as the items are copied.
1019 * @param actions[in]
1020 * @param enic_action[out]
1021 * NIC specfilc actions derived from the actions.
1025 enic_copy_action_v1(__rte_unused struct enic *enic,
1026 const struct rte_flow_action actions[],
1027 struct filter_action_v2 *enic_action)
1030 uint32_t overlap = 0;
1034 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1035 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1038 switch (actions->type) {
1039 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1040 const struct rte_flow_action_queue *queue =
1041 (const struct rte_flow_action_queue *)
1047 enic_action->rq_idx =
1048 enic_rte_rq_idx_to_sop_idx(queue->index);
1056 if (!(overlap & FATE))
1058 enic_action->type = FILTER_ACTION_RQ_STEERING;
1063 * Build the intenal version 2 NIC action structure from the provided pattern.
1064 * The pattern is validated as the items are copied.
1066 * @param actions[in]
1067 * @param enic_action[out]
1068 * NIC specfilc actions derived from the actions.
1072 enic_copy_action_v2(struct enic *enic,
1073 const struct rte_flow_action actions[],
1074 struct filter_action_v2 *enic_action)
1076 enum { FATE = 1, MARK = 2, };
1077 uint32_t overlap = 0;
1078 bool passthru = false;
1082 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1083 switch (actions->type) {
1084 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1085 const struct rte_flow_action_queue *queue =
1086 (const struct rte_flow_action_queue *)
1092 enic_action->rq_idx =
1093 enic_rte_rq_idx_to_sop_idx(queue->index);
1094 enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1097 case RTE_FLOW_ACTION_TYPE_MARK: {
1098 const struct rte_flow_action_mark *mark =
1099 (const struct rte_flow_action_mark *)
1106 * Map mark ID (32-bit) to filter ID (16-bit):
1107 * - Reject values > 16 bits
1108 * - Filter ID 0 is reserved for filters that steer
1109 * but not mark. So add 1 to the mark ID to avoid
1111 * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1112 * reserved for the "flag" action below.
1114 if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1116 enic_action->filter_id = mark->id + 1;
1117 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1120 case RTE_FLOW_ACTION_TYPE_FLAG: {
1124 /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1125 enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1126 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1129 case RTE_FLOW_ACTION_TYPE_DROP: {
1133 enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1136 case RTE_FLOW_ACTION_TYPE_COUNT: {
1137 enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1140 case RTE_FLOW_ACTION_TYPE_RSS: {
1141 const struct rte_flow_action_rss *rss =
1142 (const struct rte_flow_action_rss *)
1148 * Hardware does not support general RSS actions, but
1149 * we can still support the dummy one that is used to
1150 * "receive normally".
1152 allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1155 rss->types == enic->rss_hf) &&
1156 rss->queue_num == enic->rq_count &&
1158 /* Identity queue map is ok */
1159 for (i = 0; i < rss->queue_num; i++)
1160 allow = allow && (i == rss->queue[i]);
1165 /* Need MARK or FLAG */
1166 if (!(overlap & MARK))
1171 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1173 * Like RSS above, PASSTHRU + MARK may be used to
1174 * "mark and then receive normally". MARK usually comes
1175 * after PASSTHRU, so remember we have seen passthru
1176 * and check for mark later.
1184 case RTE_FLOW_ACTION_TYPE_VOID:
1191 /* Only PASSTHRU + MARK is allowed */
1192 if (passthru && !(overlap & MARK))
1194 if (!(overlap & FATE))
1196 enic_action->type = FILTER_ACTION_V2;
1200 /** Check if the action is supported */
1202 enic_match_action(const struct rte_flow_action *action,
1203 const enum rte_flow_action_type *supported_actions)
1205 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1206 supported_actions++) {
1207 if (action->type == *supported_actions)
1213 /** Get the NIC filter capabilties structure */
1214 static const struct enic_filter_cap *
1215 enic_get_filter_cap(struct enic *enic)
1217 if (enic->flow_filter_mode)
1218 return &enic_filter_cap[enic->flow_filter_mode];
1223 /** Get the actions for this NIC version. */
1224 static const struct enic_action_cap *
1225 enic_get_action_cap(struct enic *enic)
1227 const struct enic_action_cap *ea;
1230 actions = enic->filter_actions;
1231 if (actions & FILTER_ACTION_COUNTER_FLAG)
1232 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1233 else if (actions & FILTER_ACTION_DROP_FLAG)
1234 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1235 else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1236 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1238 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1242 /* Debug function to dump internal NIC action structure. */
1244 enic_dump_actions(const struct filter_action_v2 *ea)
1246 if (ea->type == FILTER_ACTION_RQ_STEERING) {
1247 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1248 } else if (ea->type == FILTER_ACTION_V2) {
1249 FLOW_LOG(INFO, "Actions(V2)\n");
1250 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1251 FLOW_LOG(INFO, "\tqueue: %u\n",
1252 enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1253 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1254 FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1258 /* Debug function to dump internal NIC filter structure. */
1260 enic_dump_filter(const struct filter_v2 *filt)
1262 const struct filter_generic_1 *gp;
1265 char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1266 char l4csum[16], ipfrag[16];
1268 switch (filt->type) {
1269 case FILTER_IPV4_5TUPLE:
1270 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1272 case FILTER_USNIC_IP:
1274 /* FIXME: this should be a loop */
1275 gp = &filt->u.generic_1;
1276 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1277 gp->val_vlan, gp->mask_vlan);
1279 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1281 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1282 ? "ip4(y)" : "ip4(n)");
1284 sprintf(ip4, "%s ", "ip4(x)");
1286 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1288 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1289 ? "ip6(y)" : "ip6(n)");
1291 sprintf(ip6, "%s ", "ip6(x)");
1293 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1295 (gp->val_flags & FILTER_GENERIC_1_UDP)
1296 ? "udp(y)" : "udp(n)");
1298 sprintf(udp, "%s ", "udp(x)");
1300 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1302 (gp->val_flags & FILTER_GENERIC_1_TCP)
1303 ? "tcp(y)" : "tcp(n)");
1305 sprintf(tcp, "%s ", "tcp(x)");
1307 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1308 sprintf(tcpudp, "%s ",
1309 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1310 ? "tcpudp(y)" : "tcpudp(n)");
1312 sprintf(tcpudp, "%s ", "tcpudp(x)");
1314 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1315 sprintf(ip4csum, "%s ",
1316 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1317 ? "ip4csum(y)" : "ip4csum(n)");
1319 sprintf(ip4csum, "%s ", "ip4csum(x)");
1321 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1322 sprintf(l4csum, "%s ",
1323 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1324 ? "l4csum(y)" : "l4csum(n)");
1326 sprintf(l4csum, "%s ", "l4csum(x)");
1328 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1329 sprintf(ipfrag, "%s ",
1330 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1331 ? "ipfrag(y)" : "ipfrag(n)");
1333 sprintf(ipfrag, "%s ", "ipfrag(x)");
1334 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1335 tcp, tcpudp, ip4csum, l4csum, ipfrag);
1337 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1338 mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1339 while (mbyte && !gp->layer[i].mask[mbyte])
1345 for (j = 0; j <= mbyte; j++) {
1347 gp->layer[i].mask[j]);
1351 FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1353 for (j = 0; j <= mbyte; j++) {
1355 gp->layer[i].val[j]);
1359 FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
1363 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1368 /* Debug function to dump internal NIC flow structures. */
1370 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1372 enic_dump_filter(filt);
1373 enic_dump_actions(ea);
1378 * Internal flow parse/validate function.
1381 * This device pointer.
1382 * @param pattern[in]
1383 * @param actions[in]
1385 * @param enic_filter[out]
1386 * Internal NIC filter structure pointer.
1387 * @param enic_action[out]
1388 * Internal NIC action structure pointer.
1391 enic_flow_parse(struct rte_eth_dev *dev,
1392 const struct rte_flow_attr *attrs,
1393 const struct rte_flow_item pattern[],
1394 const struct rte_flow_action actions[],
1395 struct rte_flow_error *error,
1396 struct filter_v2 *enic_filter,
1397 struct filter_action_v2 *enic_action)
1399 unsigned int ret = 0;
1400 struct enic *enic = pmd_priv(dev);
1401 const struct enic_filter_cap *enic_filter_cap;
1402 const struct enic_action_cap *enic_action_cap;
1403 const struct rte_flow_action *action;
1407 memset(enic_filter, 0, sizeof(*enic_filter));
1408 memset(enic_action, 0, sizeof(*enic_action));
1411 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1412 NULL, "No pattern specified");
1417 rte_flow_error_set(error, EINVAL,
1418 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1419 NULL, "No action specified");
1425 rte_flow_error_set(error, ENOTSUP,
1426 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1428 "priority groups are not supported");
1430 } else if (attrs->priority) {
1431 rte_flow_error_set(error, ENOTSUP,
1432 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1434 "priorities are not supported");
1436 } else if (attrs->egress) {
1437 rte_flow_error_set(error, ENOTSUP,
1438 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1440 "egress is not supported");
1442 } else if (attrs->transfer) {
1443 rte_flow_error_set(error, ENOTSUP,
1444 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1446 "transfer is not supported");
1448 } else if (!attrs->ingress) {
1449 rte_flow_error_set(error, ENOTSUP,
1450 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1452 "only ingress is supported");
1457 rte_flow_error_set(error, EINVAL,
1458 RTE_FLOW_ERROR_TYPE_ATTR,
1459 NULL, "No attribute specified");
1463 /* Verify Actions. */
1464 enic_action_cap = enic_get_action_cap(enic);
1465 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1467 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1469 else if (!enic_match_action(action, enic_action_cap->actions))
1472 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1473 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1474 action, "Invalid action.");
1477 ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1479 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1480 NULL, "Unsupported action.");
1484 /* Verify Flow items. If copying the filter from flow format to enic
1485 * format fails, the flow is not supported
1487 enic_filter_cap = enic_get_filter_cap(enic);
1488 if (enic_filter_cap == NULL) {
1489 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1490 NULL, "Flow API not available");
1493 enic_filter->type = enic->flow_filter_mode;
1494 ret = enic_copy_filter(pattern, enic_filter_cap,
1495 enic_filter, error);
1500 * Push filter/action to the NIC.
1503 * Device structure pointer.
1504 * @param enic_filter[in]
1505 * Internal NIC filter structure pointer.
1506 * @param enic_action[in]
1507 * Internal NIC action structure pointer.
1510 static struct rte_flow *
1511 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1512 struct filter_action_v2 *enic_action,
1513 struct rte_flow_error *error)
1515 struct rte_flow *flow;
1519 int last_max_flow_ctr;
1523 flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1525 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1526 NULL, "cannot allocate flow memory");
1530 flow->counter_idx = -1;
1531 last_max_flow_ctr = -1;
1532 if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1533 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1534 rte_flow_error_set(error, ENOMEM,
1535 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1536 NULL, "cannot allocate counter");
1537 goto unwind_flow_alloc;
1539 flow->counter_idx = ctr_idx;
1540 enic_action->counter_index = ctr_idx;
1542 /* If index is the largest, increase the counter DMA size */
1543 if (ctr_idx > enic->max_flow_counter) {
1544 err = vnic_dev_counter_dma_cfg(enic->vdev,
1545 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1548 rte_flow_error_set(error, -err,
1549 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1550 NULL, "counter DMA config failed");
1551 goto unwind_ctr_alloc;
1553 last_max_flow_ctr = enic->max_flow_counter;
1554 enic->max_flow_counter = ctr_idx;
1558 /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1559 entry = enic_action->rq_idx;
1560 err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1563 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1564 NULL, "vnic_dev_classifier error");
1565 goto unwind_ctr_dma_cfg;
1568 flow->enic_filter_id = entry;
1569 flow->enic_filter = *enic_filter;
1573 /* unwind if there are errors */
1575 if (last_max_flow_ctr != -1) {
1576 /* reduce counter DMA size */
1577 vnic_dev_counter_dma_cfg(enic->vdev,
1578 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1579 last_max_flow_ctr + 1);
1580 enic->max_flow_counter = last_max_flow_ctr;
1583 if (flow->counter_idx != -1)
1584 vnic_dev_counter_free(enic->vdev, ctr_idx);
1591 * Remove filter/action from the NIC.
1594 * Device structure pointer.
1595 * @param filter_id[in]
1597 * @param enic_action[in]
1598 * Internal NIC action structure pointer.
1602 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1603 struct rte_flow_error *error)
1610 filter_id = flow->enic_filter_id;
1611 err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1613 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1614 NULL, "vnic_dev_classifier failed");
1618 if (flow->counter_idx != -1) {
1619 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1620 dev_err(enic, "counter free failed, idx: %d\n",
1622 flow->counter_idx = -1;
1628 * The following functions are callbacks for Generic flow API.
1632 * Validate a flow supported by the NIC.
1634 * @see rte_flow_validate()
1638 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1639 const struct rte_flow_item pattern[],
1640 const struct rte_flow_action actions[],
1641 struct rte_flow_error *error)
1643 struct filter_v2 enic_filter;
1644 struct filter_action_v2 enic_action;
1649 ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1650 &enic_filter, &enic_action);
1652 enic_dump_flow(&enic_action, &enic_filter);
1657 * Create a flow supported by the NIC.
1659 * @see rte_flow_create()
1662 static struct rte_flow *
1663 enic_flow_create(struct rte_eth_dev *dev,
1664 const struct rte_flow_attr *attrs,
1665 const struct rte_flow_item pattern[],
1666 const struct rte_flow_action actions[],
1667 struct rte_flow_error *error)
1670 struct filter_v2 enic_filter;
1671 struct filter_action_v2 enic_action;
1672 struct rte_flow *flow;
1673 struct enic *enic = pmd_priv(dev);
1677 ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1682 rte_spinlock_lock(&enic->flows_lock);
1683 flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1686 LIST_INSERT_HEAD(&enic->flows, flow, next);
1687 rte_spinlock_unlock(&enic->flows_lock);
1693 * Destroy a flow supported by the NIC.
1695 * @see rte_flow_destroy()
1699 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1700 __rte_unused struct rte_flow_error *error)
1702 struct enic *enic = pmd_priv(dev);
1706 rte_spinlock_lock(&enic->flows_lock);
1707 enic_flow_del_filter(enic, flow, error);
1708 LIST_REMOVE(flow, next);
1709 rte_spinlock_unlock(&enic->flows_lock);
1715 * Flush all flows on the device.
1717 * @see rte_flow_flush()
1721 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1723 struct rte_flow *flow;
1724 struct enic *enic = pmd_priv(dev);
1728 rte_spinlock_lock(&enic->flows_lock);
1730 while (!LIST_EMPTY(&enic->flows)) {
1731 flow = LIST_FIRST(&enic->flows);
1732 enic_flow_del_filter(enic, flow, error);
1733 LIST_REMOVE(flow, next);
1736 rte_spinlock_unlock(&enic->flows_lock);
1741 enic_flow_query_count(struct rte_eth_dev *dev,
1742 struct rte_flow *flow, void *data,
1743 struct rte_flow_error *error)
1745 struct enic *enic = pmd_priv(dev);
1746 struct rte_flow_query_count *query;
1747 uint64_t packets, bytes;
1751 if (flow->counter_idx == -1) {
1752 return rte_flow_error_set(error, ENOTSUP,
1753 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1755 "flow does not have counter");
1757 query = (struct rte_flow_query_count *)data;
1758 if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1759 !!query->reset, &packets, &bytes)) {
1760 return rte_flow_error_set
1762 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1764 "cannot read counter");
1766 query->hits_set = 1;
1767 query->bytes_set = 1;
1768 query->hits = packets;
1769 query->bytes = bytes;
1774 enic_flow_query(struct rte_eth_dev *dev,
1775 struct rte_flow *flow,
1776 const struct rte_flow_action *actions,
1778 struct rte_flow_error *error)
1784 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1785 switch (actions->type) {
1786 case RTE_FLOW_ACTION_TYPE_VOID:
1788 case RTE_FLOW_ACTION_TYPE_COUNT:
1789 ret = enic_flow_query_count(dev, flow, data, error);
1792 return rte_flow_error_set(error, ENOTSUP,
1793 RTE_FLOW_ERROR_TYPE_ACTION,
1795 "action not supported");
1804 * Flow callback registration.
1808 const struct rte_flow_ops enic_flow_ops = {
1809 .validate = enic_flow_validate,
1810 .create = enic_flow_create,
1811 .destroy = enic_flow_destroy,
1812 .flush = enic_flow_flush,
1813 .query = enic_flow_query,