1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
14 #include "enic_compat.h"
19 #define FLOW_TRACE() \
20 rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
22 #define FLOW_LOG(level, fmt, args...) \
23 rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
27 * Common arguments passed to copy_item functions. Use this structure
28 * so we can easily add new arguments.
29 * item: Item specification.
30 * filter: Partially filled in NIC filter structure.
31 * inner_ofst: If zero, this is an outer header. If non-zero, this is
32 * the offset into L5 where the header begins.
34 struct copy_item_args {
35 const struct rte_flow_item *item;
36 struct filter_v2 *filter;
40 /* functions for copying items into enic filters */
41 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
43 /** Info about how to copy items into enic filters. */
45 /** Function for copying and validating an item. */
46 enic_copy_item_fn *copy_item;
47 /** List of valid previous items. */
48 const enum rte_flow_item_type * const prev_items;
49 /** True if it's OK for this item to be the first item. For some NIC
50 * versions, it's invalid to start the stack above layer 3.
52 const u8 valid_start_item;
55 /** Filtering capabilities for various NIC and firmware versions. */
56 struct enic_filter_cap {
57 /** list of valid items and their handlers and attributes. */
58 const struct enic_items *item_info;
59 /* Max type in the above list, used to detect unsupported types */
60 enum rte_flow_item_type max_item_type;
63 /* functions for copying flow actions into enic actions */
64 typedef int (copy_action_fn)(struct enic *enic,
65 const struct rte_flow_action actions[],
66 struct filter_action_v2 *enic_action);
68 /** Action capabilities for various NICs. */
69 struct enic_action_cap {
70 /** list of valid actions */
71 const enum rte_flow_action_type *actions;
72 /** copy function for a particular NIC */
73 copy_action_fn *copy_fn;
76 /* Forward declarations */
77 static enic_copy_item_fn enic_copy_item_ipv4_v1;
78 static enic_copy_item_fn enic_copy_item_udp_v1;
79 static enic_copy_item_fn enic_copy_item_tcp_v1;
80 static enic_copy_item_fn enic_copy_item_raw_v2;
81 static enic_copy_item_fn enic_copy_item_eth_v2;
82 static enic_copy_item_fn enic_copy_item_vlan_v2;
83 static enic_copy_item_fn enic_copy_item_ipv4_v2;
84 static enic_copy_item_fn enic_copy_item_ipv6_v2;
85 static enic_copy_item_fn enic_copy_item_udp_v2;
86 static enic_copy_item_fn enic_copy_item_tcp_v2;
87 static enic_copy_item_fn enic_copy_item_sctp_v2;
88 static enic_copy_item_fn enic_copy_item_vxlan_v2;
89 static copy_action_fn enic_copy_action_v1;
90 static copy_action_fn enic_copy_action_v2;
93 * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
96 static const struct enic_items enic_items_v1[] = {
97 [RTE_FLOW_ITEM_TYPE_IPV4] = {
98 .copy_item = enic_copy_item_ipv4_v1,
99 .valid_start_item = 1,
100 .prev_items = (const enum rte_flow_item_type[]) {
101 RTE_FLOW_ITEM_TYPE_END,
104 [RTE_FLOW_ITEM_TYPE_UDP] = {
105 .copy_item = enic_copy_item_udp_v1,
106 .valid_start_item = 0,
107 .prev_items = (const enum rte_flow_item_type[]) {
108 RTE_FLOW_ITEM_TYPE_IPV4,
109 RTE_FLOW_ITEM_TYPE_END,
112 [RTE_FLOW_ITEM_TYPE_TCP] = {
113 .copy_item = enic_copy_item_tcp_v1,
114 .valid_start_item = 0,
115 .prev_items = (const enum rte_flow_item_type[]) {
116 RTE_FLOW_ITEM_TYPE_IPV4,
117 RTE_FLOW_ITEM_TYPE_END,
123 * NICs have Advanced Filters capability but they are disabled. This means
124 * that layer 3 must be specified.
126 static const struct enic_items enic_items_v2[] = {
127 [RTE_FLOW_ITEM_TYPE_RAW] = {
128 .copy_item = enic_copy_item_raw_v2,
129 .valid_start_item = 0,
130 .prev_items = (const enum rte_flow_item_type[]) {
131 RTE_FLOW_ITEM_TYPE_UDP,
132 RTE_FLOW_ITEM_TYPE_END,
135 [RTE_FLOW_ITEM_TYPE_ETH] = {
136 .copy_item = enic_copy_item_eth_v2,
137 .valid_start_item = 1,
138 .prev_items = (const enum rte_flow_item_type[]) {
139 RTE_FLOW_ITEM_TYPE_VXLAN,
140 RTE_FLOW_ITEM_TYPE_END,
143 [RTE_FLOW_ITEM_TYPE_VLAN] = {
144 .copy_item = enic_copy_item_vlan_v2,
145 .valid_start_item = 1,
146 .prev_items = (const enum rte_flow_item_type[]) {
147 RTE_FLOW_ITEM_TYPE_ETH,
148 RTE_FLOW_ITEM_TYPE_END,
151 [RTE_FLOW_ITEM_TYPE_IPV4] = {
152 .copy_item = enic_copy_item_ipv4_v2,
153 .valid_start_item = 1,
154 .prev_items = (const enum rte_flow_item_type[]) {
155 RTE_FLOW_ITEM_TYPE_ETH,
156 RTE_FLOW_ITEM_TYPE_VLAN,
157 RTE_FLOW_ITEM_TYPE_END,
160 [RTE_FLOW_ITEM_TYPE_IPV6] = {
161 .copy_item = enic_copy_item_ipv6_v2,
162 .valid_start_item = 1,
163 .prev_items = (const enum rte_flow_item_type[]) {
164 RTE_FLOW_ITEM_TYPE_ETH,
165 RTE_FLOW_ITEM_TYPE_VLAN,
166 RTE_FLOW_ITEM_TYPE_END,
169 [RTE_FLOW_ITEM_TYPE_UDP] = {
170 .copy_item = enic_copy_item_udp_v2,
171 .valid_start_item = 0,
172 .prev_items = (const enum rte_flow_item_type[]) {
173 RTE_FLOW_ITEM_TYPE_IPV4,
174 RTE_FLOW_ITEM_TYPE_IPV6,
175 RTE_FLOW_ITEM_TYPE_END,
178 [RTE_FLOW_ITEM_TYPE_TCP] = {
179 .copy_item = enic_copy_item_tcp_v2,
180 .valid_start_item = 0,
181 .prev_items = (const enum rte_flow_item_type[]) {
182 RTE_FLOW_ITEM_TYPE_IPV4,
183 RTE_FLOW_ITEM_TYPE_IPV6,
184 RTE_FLOW_ITEM_TYPE_END,
187 [RTE_FLOW_ITEM_TYPE_SCTP] = {
188 .copy_item = enic_copy_item_sctp_v2,
189 .valid_start_item = 0,
190 .prev_items = (const enum rte_flow_item_type[]) {
191 RTE_FLOW_ITEM_TYPE_IPV4,
192 RTE_FLOW_ITEM_TYPE_IPV6,
193 RTE_FLOW_ITEM_TYPE_END,
196 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
197 .copy_item = enic_copy_item_vxlan_v2,
198 .valid_start_item = 0,
199 .prev_items = (const enum rte_flow_item_type[]) {
200 RTE_FLOW_ITEM_TYPE_UDP,
201 RTE_FLOW_ITEM_TYPE_END,
206 /** NICs with Advanced filters enabled */
207 static const struct enic_items enic_items_v3[] = {
208 [RTE_FLOW_ITEM_TYPE_RAW] = {
209 .copy_item = enic_copy_item_raw_v2,
210 .valid_start_item = 0,
211 .prev_items = (const enum rte_flow_item_type[]) {
212 RTE_FLOW_ITEM_TYPE_UDP,
213 RTE_FLOW_ITEM_TYPE_END,
216 [RTE_FLOW_ITEM_TYPE_ETH] = {
217 .copy_item = enic_copy_item_eth_v2,
218 .valid_start_item = 1,
219 .prev_items = (const enum rte_flow_item_type[]) {
220 RTE_FLOW_ITEM_TYPE_VXLAN,
221 RTE_FLOW_ITEM_TYPE_END,
224 [RTE_FLOW_ITEM_TYPE_VLAN] = {
225 .copy_item = enic_copy_item_vlan_v2,
226 .valid_start_item = 1,
227 .prev_items = (const enum rte_flow_item_type[]) {
228 RTE_FLOW_ITEM_TYPE_ETH,
229 RTE_FLOW_ITEM_TYPE_END,
232 [RTE_FLOW_ITEM_TYPE_IPV4] = {
233 .copy_item = enic_copy_item_ipv4_v2,
234 .valid_start_item = 1,
235 .prev_items = (const enum rte_flow_item_type[]) {
236 RTE_FLOW_ITEM_TYPE_ETH,
237 RTE_FLOW_ITEM_TYPE_VLAN,
238 RTE_FLOW_ITEM_TYPE_END,
241 [RTE_FLOW_ITEM_TYPE_IPV6] = {
242 .copy_item = enic_copy_item_ipv6_v2,
243 .valid_start_item = 1,
244 .prev_items = (const enum rte_flow_item_type[]) {
245 RTE_FLOW_ITEM_TYPE_ETH,
246 RTE_FLOW_ITEM_TYPE_VLAN,
247 RTE_FLOW_ITEM_TYPE_END,
250 [RTE_FLOW_ITEM_TYPE_UDP] = {
251 .copy_item = enic_copy_item_udp_v2,
252 .valid_start_item = 1,
253 .prev_items = (const enum rte_flow_item_type[]) {
254 RTE_FLOW_ITEM_TYPE_IPV4,
255 RTE_FLOW_ITEM_TYPE_IPV6,
256 RTE_FLOW_ITEM_TYPE_END,
259 [RTE_FLOW_ITEM_TYPE_TCP] = {
260 .copy_item = enic_copy_item_tcp_v2,
261 .valid_start_item = 1,
262 .prev_items = (const enum rte_flow_item_type[]) {
263 RTE_FLOW_ITEM_TYPE_IPV4,
264 RTE_FLOW_ITEM_TYPE_IPV6,
265 RTE_FLOW_ITEM_TYPE_END,
268 [RTE_FLOW_ITEM_TYPE_SCTP] = {
269 .copy_item = enic_copy_item_sctp_v2,
270 .valid_start_item = 0,
271 .prev_items = (const enum rte_flow_item_type[]) {
272 RTE_FLOW_ITEM_TYPE_IPV4,
273 RTE_FLOW_ITEM_TYPE_IPV6,
274 RTE_FLOW_ITEM_TYPE_END,
277 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
278 .copy_item = enic_copy_item_vxlan_v2,
279 .valid_start_item = 1,
280 .prev_items = (const enum rte_flow_item_type[]) {
281 RTE_FLOW_ITEM_TYPE_UDP,
282 RTE_FLOW_ITEM_TYPE_END,
287 /** Filtering capabilities indexed this NICs supported filter type. */
288 static const struct enic_filter_cap enic_filter_cap[] = {
289 [FILTER_IPV4_5TUPLE] = {
290 .item_info = enic_items_v1,
291 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
293 [FILTER_USNIC_IP] = {
294 .item_info = enic_items_v2,
295 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
298 .item_info = enic_items_v3,
299 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
303 /** Supported actions for older NICs */
304 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
305 RTE_FLOW_ACTION_TYPE_QUEUE,
306 RTE_FLOW_ACTION_TYPE_END,
309 /** Supported actions for newer NICs */
310 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
311 RTE_FLOW_ACTION_TYPE_QUEUE,
312 RTE_FLOW_ACTION_TYPE_MARK,
313 RTE_FLOW_ACTION_TYPE_FLAG,
314 RTE_FLOW_ACTION_TYPE_RSS,
315 RTE_FLOW_ACTION_TYPE_PASSTHRU,
316 RTE_FLOW_ACTION_TYPE_END,
319 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
320 RTE_FLOW_ACTION_TYPE_QUEUE,
321 RTE_FLOW_ACTION_TYPE_MARK,
322 RTE_FLOW_ACTION_TYPE_FLAG,
323 RTE_FLOW_ACTION_TYPE_DROP,
324 RTE_FLOW_ACTION_TYPE_RSS,
325 RTE_FLOW_ACTION_TYPE_PASSTHRU,
326 RTE_FLOW_ACTION_TYPE_END,
329 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
330 RTE_FLOW_ACTION_TYPE_QUEUE,
331 RTE_FLOW_ACTION_TYPE_MARK,
332 RTE_FLOW_ACTION_TYPE_FLAG,
333 RTE_FLOW_ACTION_TYPE_DROP,
334 RTE_FLOW_ACTION_TYPE_COUNT,
335 RTE_FLOW_ACTION_TYPE_RSS,
336 RTE_FLOW_ACTION_TYPE_PASSTHRU,
337 RTE_FLOW_ACTION_TYPE_END,
340 /** Action capabilities indexed by NIC version information */
341 static const struct enic_action_cap enic_action_cap[] = {
342 [FILTER_ACTION_RQ_STEERING_FLAG] = {
343 .actions = enic_supported_actions_v1,
344 .copy_fn = enic_copy_action_v1,
346 [FILTER_ACTION_FILTER_ID_FLAG] = {
347 .actions = enic_supported_actions_v2_id,
348 .copy_fn = enic_copy_action_v2,
350 [FILTER_ACTION_DROP_FLAG] = {
351 .actions = enic_supported_actions_v2_drop,
352 .copy_fn = enic_copy_action_v2,
354 [FILTER_ACTION_COUNTER_FLAG] = {
355 .actions = enic_supported_actions_v2_count,
356 .copy_fn = enic_copy_action_v2,
361 mask_exact_match(const u8 *supported, const u8 *supplied,
365 for (i = 0; i < size; i++) {
366 if (supported[i] != supplied[i])
373 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
375 const struct rte_flow_item *item = arg->item;
376 struct filter_v2 *enic_filter = arg->filter;
377 uint8_t *inner_ofst = arg->inner_ofst;
378 const struct rte_flow_item_ipv4 *spec = item->spec;
379 const struct rte_flow_item_ipv4 *mask = item->mask;
380 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
381 struct ipv4_hdr supported_mask = {
382 .src_addr = 0xffffffff,
383 .dst_addr = 0xffffffff,
392 mask = &rte_flow_item_ipv4_mask;
394 /* This is an exact match filter, both fields must be set */
395 if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
396 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
400 /* check that the suppied mask exactly matches capabilty */
401 if (!mask_exact_match((const u8 *)&supported_mask,
402 (const u8 *)item->mask, sizeof(*mask))) {
403 FLOW_LOG(ERR, "IPv4 exact match mask");
407 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
408 enic_5tup->src_addr = spec->hdr.src_addr;
409 enic_5tup->dst_addr = spec->hdr.dst_addr;
415 enic_copy_item_udp_v1(struct copy_item_args *arg)
417 const struct rte_flow_item *item = arg->item;
418 struct filter_v2 *enic_filter = arg->filter;
419 uint8_t *inner_ofst = arg->inner_ofst;
420 const struct rte_flow_item_udp *spec = item->spec;
421 const struct rte_flow_item_udp *mask = item->mask;
422 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
423 struct udp_hdr supported_mask = {
434 mask = &rte_flow_item_udp_mask;
436 /* This is an exact match filter, both ports must be set */
437 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
438 FLOW_LOG(ERR, "UDP exact match src/dst addr");
442 /* check that the suppied mask exactly matches capabilty */
443 if (!mask_exact_match((const u8 *)&supported_mask,
444 (const u8 *)item->mask, sizeof(*mask))) {
445 FLOW_LOG(ERR, "UDP exact match mask");
449 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
450 enic_5tup->src_port = spec->hdr.src_port;
451 enic_5tup->dst_port = spec->hdr.dst_port;
452 enic_5tup->protocol = PROTO_UDP;
458 enic_copy_item_tcp_v1(struct copy_item_args *arg)
460 const struct rte_flow_item *item = arg->item;
461 struct filter_v2 *enic_filter = arg->filter;
462 uint8_t *inner_ofst = arg->inner_ofst;
463 const struct rte_flow_item_tcp *spec = item->spec;
464 const struct rte_flow_item_tcp *mask = item->mask;
465 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
466 struct tcp_hdr supported_mask = {
477 mask = &rte_flow_item_tcp_mask;
479 /* This is an exact match filter, both ports must be set */
480 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
481 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
485 /* check that the suppied mask exactly matches capabilty */
486 if (!mask_exact_match((const u8 *)&supported_mask,
487 (const u8 *)item->mask, sizeof(*mask))) {
488 FLOW_LOG(ERR, "TCP exact match mask");
492 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
493 enic_5tup->src_port = spec->hdr.src_port;
494 enic_5tup->dst_port = spec->hdr.dst_port;
495 enic_5tup->protocol = PROTO_TCP;
501 enic_copy_item_eth_v2(struct copy_item_args *arg)
503 const struct rte_flow_item *item = arg->item;
504 struct filter_v2 *enic_filter = arg->filter;
505 uint8_t *inner_ofst = arg->inner_ofst;
506 struct ether_hdr enic_spec;
507 struct ether_hdr enic_mask;
508 const struct rte_flow_item_eth *spec = item->spec;
509 const struct rte_flow_item_eth *mask = item->mask;
510 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
514 /* Match all if no spec */
519 mask = &rte_flow_item_eth_mask;
521 memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
523 memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
526 memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
528 memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
530 enic_spec.ether_type = spec->type;
531 enic_mask.ether_type = mask->type;
533 if (*inner_ofst == 0) {
535 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
536 sizeof(struct ether_hdr));
537 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
538 sizeof(struct ether_hdr));
541 if ((*inner_ofst + sizeof(struct ether_hdr)) >
542 FILTER_GENERIC_1_KEY_LEN)
544 /* Offset into L5 where inner Ethernet header goes */
545 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
546 &enic_mask, sizeof(struct ether_hdr));
547 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
548 &enic_spec, sizeof(struct ether_hdr));
549 *inner_ofst += sizeof(struct ether_hdr);
555 enic_copy_item_vlan_v2(struct copy_item_args *arg)
557 const struct rte_flow_item *item = arg->item;
558 struct filter_v2 *enic_filter = arg->filter;
559 uint8_t *inner_ofst = arg->inner_ofst;
560 const struct rte_flow_item_vlan *spec = item->spec;
561 const struct rte_flow_item_vlan *mask = item->mask;
562 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
566 /* Match all if no spec */
571 mask = &rte_flow_item_vlan_mask;
573 if (*inner_ofst == 0) {
574 struct ether_hdr *eth_mask =
575 (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
576 struct ether_hdr *eth_val =
577 (void *)gp->layer[FILTER_GENERIC_1_L2].val;
579 /* Outer TPID cannot be matched */
580 if (eth_mask->ether_type)
583 * When packet matching, the VIC always compares vlan-stripped
584 * L2, regardless of vlan stripping settings. So, the inner type
585 * from vlan becomes the ether type of the eth header.
587 eth_mask->ether_type = mask->inner_type;
588 eth_val->ether_type = spec->inner_type;
589 /* For TCI, use the vlan mask/val fields (little endian). */
590 gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
591 gp->val_vlan = rte_be_to_cpu_16(spec->tci);
593 /* Inner header. Mask/Val start at *inner_ofst into L5 */
594 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
595 FILTER_GENERIC_1_KEY_LEN)
597 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
598 mask, sizeof(struct vlan_hdr));
599 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
600 spec, sizeof(struct vlan_hdr));
601 *inner_ofst += sizeof(struct vlan_hdr);
607 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
609 const struct rte_flow_item *item = arg->item;
610 struct filter_v2 *enic_filter = arg->filter;
611 uint8_t *inner_ofst = arg->inner_ofst;
612 const struct rte_flow_item_ipv4 *spec = item->spec;
613 const struct rte_flow_item_ipv4 *mask = item->mask;
614 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
618 if (*inner_ofst == 0) {
620 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
621 gp->val_flags |= FILTER_GENERIC_1_IPV4;
623 /* Match all if no spec */
628 mask = &rte_flow_item_ipv4_mask;
630 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
631 sizeof(struct ipv4_hdr));
632 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
633 sizeof(struct ipv4_hdr));
635 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
636 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
637 FILTER_GENERIC_1_KEY_LEN)
639 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
640 mask, sizeof(struct ipv4_hdr));
641 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
642 spec, sizeof(struct ipv4_hdr));
643 *inner_ofst += sizeof(struct ipv4_hdr);
649 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
651 const struct rte_flow_item *item = arg->item;
652 struct filter_v2 *enic_filter = arg->filter;
653 uint8_t *inner_ofst = arg->inner_ofst;
654 const struct rte_flow_item_ipv6 *spec = item->spec;
655 const struct rte_flow_item_ipv6 *mask = item->mask;
656 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
661 gp->mask_flags |= FILTER_GENERIC_1_IPV6;
662 gp->val_flags |= FILTER_GENERIC_1_IPV6;
664 /* Match all if no spec */
669 mask = &rte_flow_item_ipv6_mask;
671 if (*inner_ofst == 0) {
672 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
673 sizeof(struct ipv6_hdr));
674 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
675 sizeof(struct ipv6_hdr));
677 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
678 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
679 FILTER_GENERIC_1_KEY_LEN)
681 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
682 mask, sizeof(struct ipv6_hdr));
683 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
684 spec, sizeof(struct ipv6_hdr));
685 *inner_ofst += sizeof(struct ipv6_hdr);
691 enic_copy_item_udp_v2(struct copy_item_args *arg)
693 const struct rte_flow_item *item = arg->item;
694 struct filter_v2 *enic_filter = arg->filter;
695 uint8_t *inner_ofst = arg->inner_ofst;
696 const struct rte_flow_item_udp *spec = item->spec;
697 const struct rte_flow_item_udp *mask = item->mask;
698 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
703 gp->mask_flags |= FILTER_GENERIC_1_UDP;
704 gp->val_flags |= FILTER_GENERIC_1_UDP;
706 /* Match all if no spec */
711 mask = &rte_flow_item_udp_mask;
713 if (*inner_ofst == 0) {
714 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
715 sizeof(struct udp_hdr));
716 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
717 sizeof(struct udp_hdr));
719 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
720 if ((*inner_ofst + sizeof(struct udp_hdr)) >
721 FILTER_GENERIC_1_KEY_LEN)
723 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
724 mask, sizeof(struct udp_hdr));
725 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
726 spec, sizeof(struct udp_hdr));
727 *inner_ofst += sizeof(struct udp_hdr);
733 enic_copy_item_tcp_v2(struct copy_item_args *arg)
735 const struct rte_flow_item *item = arg->item;
736 struct filter_v2 *enic_filter = arg->filter;
737 uint8_t *inner_ofst = arg->inner_ofst;
738 const struct rte_flow_item_tcp *spec = item->spec;
739 const struct rte_flow_item_tcp *mask = item->mask;
740 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
745 gp->mask_flags |= FILTER_GENERIC_1_TCP;
746 gp->val_flags |= FILTER_GENERIC_1_TCP;
748 /* Match all if no spec */
755 if (*inner_ofst == 0) {
756 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
757 sizeof(struct tcp_hdr));
758 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
759 sizeof(struct tcp_hdr));
761 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
762 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
763 FILTER_GENERIC_1_KEY_LEN)
765 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
766 mask, sizeof(struct tcp_hdr));
767 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
768 spec, sizeof(struct tcp_hdr));
769 *inner_ofst += sizeof(struct tcp_hdr);
775 enic_copy_item_sctp_v2(struct copy_item_args *arg)
777 const struct rte_flow_item *item = arg->item;
778 struct filter_v2 *enic_filter = arg->filter;
779 uint8_t *inner_ofst = arg->inner_ofst;
780 const struct rte_flow_item_sctp *spec = item->spec;
781 const struct rte_flow_item_sctp *mask = item->mask;
782 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
783 uint8_t *ip_proto_mask = NULL;
784 uint8_t *ip_proto = NULL;
792 * The NIC filter API has no flags for "match sctp", so explicitly set
793 * the protocol number in the IP pattern.
795 if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
797 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
798 ip_proto_mask = &ip->next_proto_id;
799 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
800 ip_proto = &ip->next_proto_id;
801 } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
803 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
804 ip_proto_mask = &ip->proto;
805 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
806 ip_proto = &ip->proto;
808 /* Need IPv4/IPv6 pattern first */
811 *ip_proto = IPPROTO_SCTP;
812 *ip_proto_mask = 0xff;
814 /* Match all if no spec */
819 mask = &rte_flow_item_sctp_mask;
821 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
822 sizeof(struct sctp_hdr));
823 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
824 sizeof(struct sctp_hdr));
829 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
831 const struct rte_flow_item *item = arg->item;
832 struct filter_v2 *enic_filter = arg->filter;
833 uint8_t *inner_ofst = arg->inner_ofst;
834 const struct rte_flow_item_vxlan *spec = item->spec;
835 const struct rte_flow_item_vxlan *mask = item->mask;
836 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
845 * The NIC filter API has no flags for "match vxlan". Set UDP port to
846 * avoid false positives.
848 gp->mask_flags |= FILTER_GENERIC_1_UDP;
849 gp->val_flags |= FILTER_GENERIC_1_UDP;
850 udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
851 udp->dst_port = 0xffff;
852 udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
853 udp->dst_port = RTE_BE16(4789);
854 /* Match all if no spec */
859 mask = &rte_flow_item_vxlan_mask;
861 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
862 sizeof(struct vxlan_hdr));
863 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
864 sizeof(struct vxlan_hdr));
866 *inner_ofst = sizeof(struct vxlan_hdr);
871 * Copy raw item into version 2 NIC filter. Currently, raw pattern match is
872 * very limited. It is intended for matching UDP tunnel header (e.g. vxlan
876 enic_copy_item_raw_v2(struct copy_item_args *arg)
878 const struct rte_flow_item *item = arg->item;
879 struct filter_v2 *enic_filter = arg->filter;
880 uint8_t *inner_ofst = arg->inner_ofst;
881 const struct rte_flow_item_raw *spec = item->spec;
882 const struct rte_flow_item_raw *mask = item->mask;
883 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
887 /* Cannot be used for inner packet */
890 /* Need both spec and mask */
893 /* Only supports relative with offset 0 */
894 if (!spec->relative || spec->offset != 0 || spec->search || spec->limit)
896 /* Need non-null pattern that fits within the NIC's filter pattern */
897 if (spec->length == 0 || spec->length > FILTER_GENERIC_1_KEY_LEN ||
898 !spec->pattern || !mask->pattern)
901 * Mask fields, including length, are often set to zero. Assume that
902 * means "same as spec" to avoid breaking existing apps. If length
903 * is not zero, then it should be >= spec length.
905 * No more pattern follows this, so append to the L4 layer instead of
906 * L5 to work with both recent and older VICs.
908 if (mask->length != 0 && mask->length < spec->length)
910 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
911 mask->pattern, spec->length);
912 memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
913 spec->pattern, spec->length);
919 * Return 1 if current item is valid on top of the previous one.
921 * @param prev_item[in]
922 * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
924 * @param item_info[in]
925 * Info about this item, like valid previous items.
926 * @param is_first[in]
927 * True if this the first item in the pattern.
930 item_stacking_valid(enum rte_flow_item_type prev_item,
931 const struct enic_items *item_info, u8 is_first_item)
933 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
937 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
938 if (prev_item == *allowed_items)
942 /* This is the first item in the stack. Check if that's cool */
943 if (is_first_item && item_info->valid_start_item)
950 * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
951 * Instead it is in L4 following the UDP header. Append the vxlan
952 * pattern to L4 (udp) and shift any inner packet pattern in L5.
955 fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
958 uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
962 if (!(inner_ofst > 0 && enic->vxlan))
965 vxlan = sizeof(struct vxlan_hdr);
966 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
967 gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
968 memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
969 gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
970 inner = inner_ofst - vxlan;
971 memset(layer, 0, sizeof(layer));
972 memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
973 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
974 memset(layer, 0, sizeof(layer));
975 memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
976 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
980 * Build the intenal enic filter structure from the provided pattern. The
981 * pattern is validated as the items are copied.
984 * @param items_info[in]
985 * Info about this NICs item support, like valid previous items.
986 * @param enic_filter[out]
987 * NIC specfilc filters derived from the pattern.
991 enic_copy_filter(const struct rte_flow_item pattern[],
992 const struct enic_filter_cap *cap,
994 struct filter_v2 *enic_filter,
995 struct rte_flow_error *error)
998 const struct rte_flow_item *item = pattern;
999 u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
1000 enum rte_flow_item_type prev_item;
1001 const struct enic_items *item_info;
1002 struct copy_item_args args;
1003 u8 is_first_item = 1;
1009 args.filter = enic_filter;
1010 args.inner_ofst = &inner_ofst;
1011 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1012 /* Get info about how to validate and copy the item. If NULL
1013 * is returned the nic does not support the item.
1015 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1018 item_info = &cap->item_info[item->type];
1019 if (item->type > cap->max_item_type ||
1020 item_info->copy_item == NULL) {
1021 rte_flow_error_set(error, ENOTSUP,
1022 RTE_FLOW_ERROR_TYPE_ITEM,
1023 NULL, "Unsupported item.");
1027 /* check to see if item stacking is valid */
1028 if (!item_stacking_valid(prev_item, item_info, is_first_item))
1029 goto stacking_error;
1032 ret = item_info->copy_item(&args);
1034 goto item_not_supported;
1035 prev_item = item->type;
1038 fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1043 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1044 NULL, "enic type error");
1048 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1049 item, "stacking error");
1054 * Build the intenal version 1 NIC action structure from the provided pattern.
1055 * The pattern is validated as the items are copied.
1057 * @param actions[in]
1058 * @param enic_action[out]
1059 * NIC specfilc actions derived from the actions.
1063 enic_copy_action_v1(__rte_unused struct enic *enic,
1064 const struct rte_flow_action actions[],
1065 struct filter_action_v2 *enic_action)
1068 uint32_t overlap = 0;
1072 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1073 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1076 switch (actions->type) {
1077 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1078 const struct rte_flow_action_queue *queue =
1079 (const struct rte_flow_action_queue *)
1085 enic_action->rq_idx =
1086 enic_rte_rq_idx_to_sop_idx(queue->index);
1094 if (!(overlap & FATE))
1096 enic_action->type = FILTER_ACTION_RQ_STEERING;
1101 * Build the intenal version 2 NIC action structure from the provided pattern.
1102 * The pattern is validated as the items are copied.
1104 * @param actions[in]
1105 * @param enic_action[out]
1106 * NIC specfilc actions derived from the actions.
1110 enic_copy_action_v2(struct enic *enic,
1111 const struct rte_flow_action actions[],
1112 struct filter_action_v2 *enic_action)
1114 enum { FATE = 1, MARK = 2, };
1115 uint32_t overlap = 0;
1116 bool passthru = false;
1120 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1121 switch (actions->type) {
1122 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1123 const struct rte_flow_action_queue *queue =
1124 (const struct rte_flow_action_queue *)
1130 enic_action->rq_idx =
1131 enic_rte_rq_idx_to_sop_idx(queue->index);
1132 enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1135 case RTE_FLOW_ACTION_TYPE_MARK: {
1136 const struct rte_flow_action_mark *mark =
1137 (const struct rte_flow_action_mark *)
1144 * Map mark ID (32-bit) to filter ID (16-bit):
1145 * - Reject values > 16 bits
1146 * - Filter ID 0 is reserved for filters that steer
1147 * but not mark. So add 1 to the mark ID to avoid
1149 * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1150 * reserved for the "flag" action below.
1152 if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1154 enic_action->filter_id = mark->id + 1;
1155 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1158 case RTE_FLOW_ACTION_TYPE_FLAG: {
1162 /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1163 enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1164 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1167 case RTE_FLOW_ACTION_TYPE_DROP: {
1171 enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1174 case RTE_FLOW_ACTION_TYPE_COUNT: {
1175 enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1178 case RTE_FLOW_ACTION_TYPE_RSS: {
1179 const struct rte_flow_action_rss *rss =
1180 (const struct rte_flow_action_rss *)
1186 * Hardware does not support general RSS actions, but
1187 * we can still support the dummy one that is used to
1188 * "receive normally".
1190 allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1193 rss->types == enic->rss_hf) &&
1194 rss->queue_num == enic->rq_count &&
1196 /* Identity queue map is ok */
1197 for (i = 0; i < rss->queue_num; i++)
1198 allow = allow && (i == rss->queue[i]);
1203 /* Need MARK or FLAG */
1204 if (!(overlap & MARK))
1209 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1211 * Like RSS above, PASSTHRU + MARK may be used to
1212 * "mark and then receive normally". MARK usually comes
1213 * after PASSTHRU, so remember we have seen passthru
1214 * and check for mark later.
1222 case RTE_FLOW_ACTION_TYPE_VOID:
1229 /* Only PASSTHRU + MARK is allowed */
1230 if (passthru && !(overlap & MARK))
1232 if (!(overlap & FATE))
1234 enic_action->type = FILTER_ACTION_V2;
1238 /** Check if the action is supported */
1240 enic_match_action(const struct rte_flow_action *action,
1241 const enum rte_flow_action_type *supported_actions)
1243 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1244 supported_actions++) {
1245 if (action->type == *supported_actions)
1251 /** Get the NIC filter capabilties structure */
1252 static const struct enic_filter_cap *
1253 enic_get_filter_cap(struct enic *enic)
1255 if (enic->flow_filter_mode)
1256 return &enic_filter_cap[enic->flow_filter_mode];
1261 /** Get the actions for this NIC version. */
1262 static const struct enic_action_cap *
1263 enic_get_action_cap(struct enic *enic)
1265 const struct enic_action_cap *ea;
1268 actions = enic->filter_actions;
1269 if (actions & FILTER_ACTION_COUNTER_FLAG)
1270 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1271 else if (actions & FILTER_ACTION_DROP_FLAG)
1272 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1273 else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1274 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1276 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1280 /* Debug function to dump internal NIC action structure. */
1282 enic_dump_actions(const struct filter_action_v2 *ea)
1284 if (ea->type == FILTER_ACTION_RQ_STEERING) {
1285 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1286 } else if (ea->type == FILTER_ACTION_V2) {
1287 FLOW_LOG(INFO, "Actions(V2)\n");
1288 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1289 FLOW_LOG(INFO, "\tqueue: %u\n",
1290 enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1291 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1292 FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1296 /* Debug function to dump internal NIC filter structure. */
1298 enic_dump_filter(const struct filter_v2 *filt)
1300 const struct filter_generic_1 *gp;
1303 char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1304 char l4csum[16], ipfrag[16];
1306 switch (filt->type) {
1307 case FILTER_IPV4_5TUPLE:
1308 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1310 case FILTER_USNIC_IP:
1312 /* FIXME: this should be a loop */
1313 gp = &filt->u.generic_1;
1314 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1315 gp->val_vlan, gp->mask_vlan);
1317 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1319 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1320 ? "ip4(y)" : "ip4(n)");
1322 sprintf(ip4, "%s ", "ip4(x)");
1324 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1326 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1327 ? "ip6(y)" : "ip6(n)");
1329 sprintf(ip6, "%s ", "ip6(x)");
1331 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1333 (gp->val_flags & FILTER_GENERIC_1_UDP)
1334 ? "udp(y)" : "udp(n)");
1336 sprintf(udp, "%s ", "udp(x)");
1338 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1340 (gp->val_flags & FILTER_GENERIC_1_TCP)
1341 ? "tcp(y)" : "tcp(n)");
1343 sprintf(tcp, "%s ", "tcp(x)");
1345 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1346 sprintf(tcpudp, "%s ",
1347 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1348 ? "tcpudp(y)" : "tcpudp(n)");
1350 sprintf(tcpudp, "%s ", "tcpudp(x)");
1352 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1353 sprintf(ip4csum, "%s ",
1354 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1355 ? "ip4csum(y)" : "ip4csum(n)");
1357 sprintf(ip4csum, "%s ", "ip4csum(x)");
1359 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1360 sprintf(l4csum, "%s ",
1361 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1362 ? "l4csum(y)" : "l4csum(n)");
1364 sprintf(l4csum, "%s ", "l4csum(x)");
1366 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1367 sprintf(ipfrag, "%s ",
1368 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1369 ? "ipfrag(y)" : "ipfrag(n)");
1371 sprintf(ipfrag, "%s ", "ipfrag(x)");
1372 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1373 tcp, tcpudp, ip4csum, l4csum, ipfrag);
1375 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1376 mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1377 while (mbyte && !gp->layer[i].mask[mbyte])
1383 for (j = 0; j <= mbyte; j++) {
1385 gp->layer[i].mask[j]);
1389 FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1391 for (j = 0; j <= mbyte; j++) {
1393 gp->layer[i].val[j]);
1397 FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
1401 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1406 /* Debug function to dump internal NIC flow structures. */
1408 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1410 enic_dump_filter(filt);
1411 enic_dump_actions(ea);
1416 * Internal flow parse/validate function.
1419 * This device pointer.
1420 * @param pattern[in]
1421 * @param actions[in]
1423 * @param enic_filter[out]
1424 * Internal NIC filter structure pointer.
1425 * @param enic_action[out]
1426 * Internal NIC action structure pointer.
1429 enic_flow_parse(struct rte_eth_dev *dev,
1430 const struct rte_flow_attr *attrs,
1431 const struct rte_flow_item pattern[],
1432 const struct rte_flow_action actions[],
1433 struct rte_flow_error *error,
1434 struct filter_v2 *enic_filter,
1435 struct filter_action_v2 *enic_action)
1437 unsigned int ret = 0;
1438 struct enic *enic = pmd_priv(dev);
1439 const struct enic_filter_cap *enic_filter_cap;
1440 const struct enic_action_cap *enic_action_cap;
1441 const struct rte_flow_action *action;
1445 memset(enic_filter, 0, sizeof(*enic_filter));
1446 memset(enic_action, 0, sizeof(*enic_action));
1449 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1450 NULL, "No pattern specified");
1455 rte_flow_error_set(error, EINVAL,
1456 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1457 NULL, "No action specified");
1463 rte_flow_error_set(error, ENOTSUP,
1464 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1466 "priority groups are not supported");
1468 } else if (attrs->priority) {
1469 rte_flow_error_set(error, ENOTSUP,
1470 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1472 "priorities are not supported");
1474 } else if (attrs->egress) {
1475 rte_flow_error_set(error, ENOTSUP,
1476 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1478 "egress is not supported");
1480 } else if (attrs->transfer) {
1481 rte_flow_error_set(error, ENOTSUP,
1482 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1484 "transfer is not supported");
1486 } else if (!attrs->ingress) {
1487 rte_flow_error_set(error, ENOTSUP,
1488 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1490 "only ingress is supported");
1495 rte_flow_error_set(error, EINVAL,
1496 RTE_FLOW_ERROR_TYPE_ATTR,
1497 NULL, "No attribute specified");
1501 /* Verify Actions. */
1502 enic_action_cap = enic_get_action_cap(enic);
1503 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1505 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1507 else if (!enic_match_action(action, enic_action_cap->actions))
1510 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1511 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1512 action, "Invalid action.");
1515 ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1517 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1518 NULL, "Unsupported action.");
1522 /* Verify Flow items. If copying the filter from flow format to enic
1523 * format fails, the flow is not supported
1525 enic_filter_cap = enic_get_filter_cap(enic);
1526 if (enic_filter_cap == NULL) {
1527 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1528 NULL, "Flow API not available");
1531 enic_filter->type = enic->flow_filter_mode;
1532 ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1533 enic_filter, error);
1538 * Push filter/action to the NIC.
1541 * Device structure pointer.
1542 * @param enic_filter[in]
1543 * Internal NIC filter structure pointer.
1544 * @param enic_action[in]
1545 * Internal NIC action structure pointer.
1548 static struct rte_flow *
1549 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1550 struct filter_action_v2 *enic_action,
1551 struct rte_flow_error *error)
1553 struct rte_flow *flow;
1557 int last_max_flow_ctr;
1561 flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1563 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1564 NULL, "cannot allocate flow memory");
1568 flow->counter_idx = -1;
1569 last_max_flow_ctr = -1;
1570 if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1571 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1572 rte_flow_error_set(error, ENOMEM,
1573 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1574 NULL, "cannot allocate counter");
1575 goto unwind_flow_alloc;
1577 flow->counter_idx = ctr_idx;
1578 enic_action->counter_index = ctr_idx;
1580 /* If index is the largest, increase the counter DMA size */
1581 if (ctr_idx > enic->max_flow_counter) {
1582 err = vnic_dev_counter_dma_cfg(enic->vdev,
1583 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1586 rte_flow_error_set(error, -err,
1587 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1588 NULL, "counter DMA config failed");
1589 goto unwind_ctr_alloc;
1591 last_max_flow_ctr = enic->max_flow_counter;
1592 enic->max_flow_counter = ctr_idx;
1596 /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1597 entry = enic_action->rq_idx;
1598 err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1601 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1602 NULL, "vnic_dev_classifier error");
1603 goto unwind_ctr_dma_cfg;
1606 flow->enic_filter_id = entry;
1607 flow->enic_filter = *enic_filter;
1611 /* unwind if there are errors */
1613 if (last_max_flow_ctr != -1) {
1614 /* reduce counter DMA size */
1615 vnic_dev_counter_dma_cfg(enic->vdev,
1616 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1617 last_max_flow_ctr + 1);
1618 enic->max_flow_counter = last_max_flow_ctr;
1621 if (flow->counter_idx != -1)
1622 vnic_dev_counter_free(enic->vdev, ctr_idx);
1629 * Remove filter/action from the NIC.
1632 * Device structure pointer.
1633 * @param filter_id[in]
1635 * @param enic_action[in]
1636 * Internal NIC action structure pointer.
1640 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1641 struct rte_flow_error *error)
1648 filter_id = flow->enic_filter_id;
1649 err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1651 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1652 NULL, "vnic_dev_classifier failed");
1656 if (flow->counter_idx != -1) {
1657 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1658 dev_err(enic, "counter free failed, idx: %d\n",
1660 flow->counter_idx = -1;
1666 * The following functions are callbacks for Generic flow API.
1670 * Validate a flow supported by the NIC.
1672 * @see rte_flow_validate()
1676 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1677 const struct rte_flow_item pattern[],
1678 const struct rte_flow_action actions[],
1679 struct rte_flow_error *error)
1681 struct filter_v2 enic_filter;
1682 struct filter_action_v2 enic_action;
1687 ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1688 &enic_filter, &enic_action);
1690 enic_dump_flow(&enic_action, &enic_filter);
1695 * Create a flow supported by the NIC.
1697 * @see rte_flow_create()
1700 static struct rte_flow *
1701 enic_flow_create(struct rte_eth_dev *dev,
1702 const struct rte_flow_attr *attrs,
1703 const struct rte_flow_item pattern[],
1704 const struct rte_flow_action actions[],
1705 struct rte_flow_error *error)
1708 struct filter_v2 enic_filter;
1709 struct filter_action_v2 enic_action;
1710 struct rte_flow *flow;
1711 struct enic *enic = pmd_priv(dev);
1715 ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1720 rte_spinlock_lock(&enic->flows_lock);
1721 flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1724 LIST_INSERT_HEAD(&enic->flows, flow, next);
1725 rte_spinlock_unlock(&enic->flows_lock);
1731 * Destroy a flow supported by the NIC.
1733 * @see rte_flow_destroy()
1737 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1738 __rte_unused struct rte_flow_error *error)
1740 struct enic *enic = pmd_priv(dev);
1744 rte_spinlock_lock(&enic->flows_lock);
1745 enic_flow_del_filter(enic, flow, error);
1746 LIST_REMOVE(flow, next);
1747 rte_spinlock_unlock(&enic->flows_lock);
1753 * Flush all flows on the device.
1755 * @see rte_flow_flush()
1759 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1761 struct rte_flow *flow;
1762 struct enic *enic = pmd_priv(dev);
1766 rte_spinlock_lock(&enic->flows_lock);
1768 while (!LIST_EMPTY(&enic->flows)) {
1769 flow = LIST_FIRST(&enic->flows);
1770 enic_flow_del_filter(enic, flow, error);
1771 LIST_REMOVE(flow, next);
1774 rte_spinlock_unlock(&enic->flows_lock);
1779 enic_flow_query_count(struct rte_eth_dev *dev,
1780 struct rte_flow *flow, void *data,
1781 struct rte_flow_error *error)
1783 struct enic *enic = pmd_priv(dev);
1784 struct rte_flow_query_count *query;
1785 uint64_t packets, bytes;
1789 if (flow->counter_idx == -1) {
1790 return rte_flow_error_set(error, ENOTSUP,
1791 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1793 "flow does not have counter");
1795 query = (struct rte_flow_query_count *)data;
1796 if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1797 !!query->reset, &packets, &bytes)) {
1798 return rte_flow_error_set
1800 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1802 "cannot read counter");
1804 query->hits_set = 1;
1805 query->bytes_set = 1;
1806 query->hits = packets;
1807 query->bytes = bytes;
1812 enic_flow_query(struct rte_eth_dev *dev,
1813 struct rte_flow *flow,
1814 const struct rte_flow_action *actions,
1816 struct rte_flow_error *error)
1822 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1823 switch (actions->type) {
1824 case RTE_FLOW_ACTION_TYPE_VOID:
1826 case RTE_FLOW_ACTION_TYPE_COUNT:
1827 ret = enic_flow_query_count(dev, flow, data, error);
1830 return rte_flow_error_set(error, ENOTSUP,
1831 RTE_FLOW_ERROR_TYPE_ACTION,
1833 "action not supported");
1842 * Flow callback registration.
1846 const struct rte_flow_ops enic_flow_ops = {
1847 .validate = enic_flow_validate,
1848 .create = enic_flow_create,
1849 .destroy = enic_flow_destroy,
1850 .flush = enic_flow_flush,
1851 .query = enic_flow_query,