1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
14 #include "enic_compat.h"
19 #define FLOW_TRACE() \
20 rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
22 #define FLOW_LOG(level, fmt, args...) \
23 rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
27 * Common arguments passed to copy_item functions. Use this structure
28 * so we can easily add new arguments.
29 * item: Item specification.
30 * filter: Partially filled in NIC filter structure.
31 * inner_ofst: If zero, this is an outer header. If non-zero, this is
32 * the offset into L5 where the header begins.
33 * l2_proto_off: offset to EtherType eth or vlan header.
34 * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
36 struct copy_item_args {
37 const struct rte_flow_item *item;
38 struct filter_v2 *filter;
45 /* functions for copying items into enic filters */
46 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
48 /** Info about how to copy items into enic filters. */
50 /** Function for copying and validating an item. */
51 enic_copy_item_fn *copy_item;
52 /** List of valid previous items. */
53 const enum rte_flow_item_type * const prev_items;
54 /** True if it's OK for this item to be the first item. For some NIC
55 * versions, it's invalid to start the stack above layer 3.
57 const u8 valid_start_item;
58 /* Inner packet version of copy_item. */
59 enic_copy_item_fn *inner_copy_item;
62 /** Filtering capabilities for various NIC and firmware versions. */
63 struct enic_filter_cap {
64 /** list of valid items and their handlers and attributes. */
65 const struct enic_items *item_info;
66 /* Max type in the above list, used to detect unsupported types */
67 enum rte_flow_item_type max_item_type;
70 /* functions for copying flow actions into enic actions */
71 typedef int (copy_action_fn)(struct enic *enic,
72 const struct rte_flow_action actions[],
73 struct filter_action_v2 *enic_action);
75 /** Action capabilities for various NICs. */
76 struct enic_action_cap {
77 /** list of valid actions */
78 const enum rte_flow_action_type *actions;
79 /** copy function for a particular NIC */
80 copy_action_fn *copy_fn;
83 /* Forward declarations */
84 static enic_copy_item_fn enic_copy_item_ipv4_v1;
85 static enic_copy_item_fn enic_copy_item_udp_v1;
86 static enic_copy_item_fn enic_copy_item_tcp_v1;
87 static enic_copy_item_fn enic_copy_item_raw_v2;
88 static enic_copy_item_fn enic_copy_item_eth_v2;
89 static enic_copy_item_fn enic_copy_item_vlan_v2;
90 static enic_copy_item_fn enic_copy_item_ipv4_v2;
91 static enic_copy_item_fn enic_copy_item_ipv6_v2;
92 static enic_copy_item_fn enic_copy_item_udp_v2;
93 static enic_copy_item_fn enic_copy_item_tcp_v2;
94 static enic_copy_item_fn enic_copy_item_sctp_v2;
95 static enic_copy_item_fn enic_copy_item_vxlan_v2;
96 static enic_copy_item_fn enic_copy_item_inner_eth_v2;
97 static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
98 static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
99 static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
100 static enic_copy_item_fn enic_copy_item_inner_udp_v2;
101 static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
102 static copy_action_fn enic_copy_action_v1;
103 static copy_action_fn enic_copy_action_v2;
106 * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
109 static const struct enic_items enic_items_v1[] = {
110 [RTE_FLOW_ITEM_TYPE_IPV4] = {
111 .copy_item = enic_copy_item_ipv4_v1,
112 .valid_start_item = 1,
113 .prev_items = (const enum rte_flow_item_type[]) {
114 RTE_FLOW_ITEM_TYPE_END,
116 .inner_copy_item = NULL,
118 [RTE_FLOW_ITEM_TYPE_UDP] = {
119 .copy_item = enic_copy_item_udp_v1,
120 .valid_start_item = 0,
121 .prev_items = (const enum rte_flow_item_type[]) {
122 RTE_FLOW_ITEM_TYPE_IPV4,
123 RTE_FLOW_ITEM_TYPE_END,
125 .inner_copy_item = NULL,
127 [RTE_FLOW_ITEM_TYPE_TCP] = {
128 .copy_item = enic_copy_item_tcp_v1,
129 .valid_start_item = 0,
130 .prev_items = (const enum rte_flow_item_type[]) {
131 RTE_FLOW_ITEM_TYPE_IPV4,
132 RTE_FLOW_ITEM_TYPE_END,
134 .inner_copy_item = NULL,
139 * NICs have Advanced Filters capability but they are disabled. This means
140 * that layer 3 must be specified.
142 static const struct enic_items enic_items_v2[] = {
143 [RTE_FLOW_ITEM_TYPE_RAW] = {
144 .copy_item = enic_copy_item_raw_v2,
145 .valid_start_item = 0,
146 .prev_items = (const enum rte_flow_item_type[]) {
147 RTE_FLOW_ITEM_TYPE_UDP,
148 RTE_FLOW_ITEM_TYPE_END,
150 .inner_copy_item = NULL,
152 [RTE_FLOW_ITEM_TYPE_ETH] = {
153 .copy_item = enic_copy_item_eth_v2,
154 .valid_start_item = 1,
155 .prev_items = (const enum rte_flow_item_type[]) {
156 RTE_FLOW_ITEM_TYPE_VXLAN,
157 RTE_FLOW_ITEM_TYPE_END,
159 .inner_copy_item = enic_copy_item_inner_eth_v2,
161 [RTE_FLOW_ITEM_TYPE_VLAN] = {
162 .copy_item = enic_copy_item_vlan_v2,
163 .valid_start_item = 1,
164 .prev_items = (const enum rte_flow_item_type[]) {
165 RTE_FLOW_ITEM_TYPE_ETH,
166 RTE_FLOW_ITEM_TYPE_END,
168 .inner_copy_item = enic_copy_item_inner_vlan_v2,
170 [RTE_FLOW_ITEM_TYPE_IPV4] = {
171 .copy_item = enic_copy_item_ipv4_v2,
172 .valid_start_item = 1,
173 .prev_items = (const enum rte_flow_item_type[]) {
174 RTE_FLOW_ITEM_TYPE_ETH,
175 RTE_FLOW_ITEM_TYPE_VLAN,
176 RTE_FLOW_ITEM_TYPE_END,
178 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
180 [RTE_FLOW_ITEM_TYPE_IPV6] = {
181 .copy_item = enic_copy_item_ipv6_v2,
182 .valid_start_item = 1,
183 .prev_items = (const enum rte_flow_item_type[]) {
184 RTE_FLOW_ITEM_TYPE_ETH,
185 RTE_FLOW_ITEM_TYPE_VLAN,
186 RTE_FLOW_ITEM_TYPE_END,
188 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
190 [RTE_FLOW_ITEM_TYPE_UDP] = {
191 .copy_item = enic_copy_item_udp_v2,
192 .valid_start_item = 0,
193 .prev_items = (const enum rte_flow_item_type[]) {
194 RTE_FLOW_ITEM_TYPE_IPV4,
195 RTE_FLOW_ITEM_TYPE_IPV6,
196 RTE_FLOW_ITEM_TYPE_END,
198 .inner_copy_item = enic_copy_item_inner_udp_v2,
200 [RTE_FLOW_ITEM_TYPE_TCP] = {
201 .copy_item = enic_copy_item_tcp_v2,
202 .valid_start_item = 0,
203 .prev_items = (const enum rte_flow_item_type[]) {
204 RTE_FLOW_ITEM_TYPE_IPV4,
205 RTE_FLOW_ITEM_TYPE_IPV6,
206 RTE_FLOW_ITEM_TYPE_END,
208 .inner_copy_item = enic_copy_item_inner_tcp_v2,
210 [RTE_FLOW_ITEM_TYPE_SCTP] = {
211 .copy_item = enic_copy_item_sctp_v2,
212 .valid_start_item = 0,
213 .prev_items = (const enum rte_flow_item_type[]) {
214 RTE_FLOW_ITEM_TYPE_IPV4,
215 RTE_FLOW_ITEM_TYPE_IPV6,
216 RTE_FLOW_ITEM_TYPE_END,
218 .inner_copy_item = NULL,
220 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
221 .copy_item = enic_copy_item_vxlan_v2,
222 .valid_start_item = 0,
223 .prev_items = (const enum rte_flow_item_type[]) {
224 RTE_FLOW_ITEM_TYPE_UDP,
225 RTE_FLOW_ITEM_TYPE_END,
227 .inner_copy_item = NULL,
231 /** NICs with Advanced filters enabled */
232 static const struct enic_items enic_items_v3[] = {
233 [RTE_FLOW_ITEM_TYPE_RAW] = {
234 .copy_item = enic_copy_item_raw_v2,
235 .valid_start_item = 0,
236 .prev_items = (const enum rte_flow_item_type[]) {
237 RTE_FLOW_ITEM_TYPE_UDP,
238 RTE_FLOW_ITEM_TYPE_END,
240 .inner_copy_item = NULL,
242 [RTE_FLOW_ITEM_TYPE_ETH] = {
243 .copy_item = enic_copy_item_eth_v2,
244 .valid_start_item = 1,
245 .prev_items = (const enum rte_flow_item_type[]) {
246 RTE_FLOW_ITEM_TYPE_VXLAN,
247 RTE_FLOW_ITEM_TYPE_END,
249 .inner_copy_item = enic_copy_item_inner_eth_v2,
251 [RTE_FLOW_ITEM_TYPE_VLAN] = {
252 .copy_item = enic_copy_item_vlan_v2,
253 .valid_start_item = 1,
254 .prev_items = (const enum rte_flow_item_type[]) {
255 RTE_FLOW_ITEM_TYPE_ETH,
256 RTE_FLOW_ITEM_TYPE_END,
258 .inner_copy_item = enic_copy_item_inner_vlan_v2,
260 [RTE_FLOW_ITEM_TYPE_IPV4] = {
261 .copy_item = enic_copy_item_ipv4_v2,
262 .valid_start_item = 1,
263 .prev_items = (const enum rte_flow_item_type[]) {
264 RTE_FLOW_ITEM_TYPE_ETH,
265 RTE_FLOW_ITEM_TYPE_VLAN,
266 RTE_FLOW_ITEM_TYPE_END,
268 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
270 [RTE_FLOW_ITEM_TYPE_IPV6] = {
271 .copy_item = enic_copy_item_ipv6_v2,
272 .valid_start_item = 1,
273 .prev_items = (const enum rte_flow_item_type[]) {
274 RTE_FLOW_ITEM_TYPE_ETH,
275 RTE_FLOW_ITEM_TYPE_VLAN,
276 RTE_FLOW_ITEM_TYPE_END,
278 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
280 [RTE_FLOW_ITEM_TYPE_UDP] = {
281 .copy_item = enic_copy_item_udp_v2,
282 .valid_start_item = 1,
283 .prev_items = (const enum rte_flow_item_type[]) {
284 RTE_FLOW_ITEM_TYPE_IPV4,
285 RTE_FLOW_ITEM_TYPE_IPV6,
286 RTE_FLOW_ITEM_TYPE_END,
288 .inner_copy_item = enic_copy_item_inner_udp_v2,
290 [RTE_FLOW_ITEM_TYPE_TCP] = {
291 .copy_item = enic_copy_item_tcp_v2,
292 .valid_start_item = 1,
293 .prev_items = (const enum rte_flow_item_type[]) {
294 RTE_FLOW_ITEM_TYPE_IPV4,
295 RTE_FLOW_ITEM_TYPE_IPV6,
296 RTE_FLOW_ITEM_TYPE_END,
298 .inner_copy_item = enic_copy_item_inner_tcp_v2,
300 [RTE_FLOW_ITEM_TYPE_SCTP] = {
301 .copy_item = enic_copy_item_sctp_v2,
302 .valid_start_item = 0,
303 .prev_items = (const enum rte_flow_item_type[]) {
304 RTE_FLOW_ITEM_TYPE_IPV4,
305 RTE_FLOW_ITEM_TYPE_IPV6,
306 RTE_FLOW_ITEM_TYPE_END,
308 .inner_copy_item = NULL,
310 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
311 .copy_item = enic_copy_item_vxlan_v2,
312 .valid_start_item = 1,
313 .prev_items = (const enum rte_flow_item_type[]) {
314 RTE_FLOW_ITEM_TYPE_UDP,
315 RTE_FLOW_ITEM_TYPE_END,
317 .inner_copy_item = NULL,
321 /** Filtering capabilities indexed this NICs supported filter type. */
322 static const struct enic_filter_cap enic_filter_cap[] = {
323 [FILTER_IPV4_5TUPLE] = {
324 .item_info = enic_items_v1,
325 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
327 [FILTER_USNIC_IP] = {
328 .item_info = enic_items_v2,
329 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
332 .item_info = enic_items_v3,
333 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
337 /** Supported actions for older NICs */
338 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
339 RTE_FLOW_ACTION_TYPE_QUEUE,
340 RTE_FLOW_ACTION_TYPE_END,
343 /** Supported actions for newer NICs */
344 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
345 RTE_FLOW_ACTION_TYPE_QUEUE,
346 RTE_FLOW_ACTION_TYPE_MARK,
347 RTE_FLOW_ACTION_TYPE_FLAG,
348 RTE_FLOW_ACTION_TYPE_RSS,
349 RTE_FLOW_ACTION_TYPE_PASSTHRU,
350 RTE_FLOW_ACTION_TYPE_END,
353 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
354 RTE_FLOW_ACTION_TYPE_QUEUE,
355 RTE_FLOW_ACTION_TYPE_MARK,
356 RTE_FLOW_ACTION_TYPE_FLAG,
357 RTE_FLOW_ACTION_TYPE_DROP,
358 RTE_FLOW_ACTION_TYPE_RSS,
359 RTE_FLOW_ACTION_TYPE_PASSTHRU,
360 RTE_FLOW_ACTION_TYPE_END,
363 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
364 RTE_FLOW_ACTION_TYPE_QUEUE,
365 RTE_FLOW_ACTION_TYPE_MARK,
366 RTE_FLOW_ACTION_TYPE_FLAG,
367 RTE_FLOW_ACTION_TYPE_DROP,
368 RTE_FLOW_ACTION_TYPE_COUNT,
369 RTE_FLOW_ACTION_TYPE_RSS,
370 RTE_FLOW_ACTION_TYPE_PASSTHRU,
371 RTE_FLOW_ACTION_TYPE_END,
374 /** Action capabilities indexed by NIC version information */
375 static const struct enic_action_cap enic_action_cap[] = {
376 [FILTER_ACTION_RQ_STEERING_FLAG] = {
377 .actions = enic_supported_actions_v1,
378 .copy_fn = enic_copy_action_v1,
380 [FILTER_ACTION_FILTER_ID_FLAG] = {
381 .actions = enic_supported_actions_v2_id,
382 .copy_fn = enic_copy_action_v2,
384 [FILTER_ACTION_DROP_FLAG] = {
385 .actions = enic_supported_actions_v2_drop,
386 .copy_fn = enic_copy_action_v2,
388 [FILTER_ACTION_COUNTER_FLAG] = {
389 .actions = enic_supported_actions_v2_count,
390 .copy_fn = enic_copy_action_v2,
395 mask_exact_match(const u8 *supported, const u8 *supplied,
399 for (i = 0; i < size; i++) {
400 if (supported[i] != supplied[i])
407 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
409 const struct rte_flow_item *item = arg->item;
410 struct filter_v2 *enic_filter = arg->filter;
411 const struct rte_flow_item_ipv4 *spec = item->spec;
412 const struct rte_flow_item_ipv4 *mask = item->mask;
413 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
414 struct ipv4_hdr supported_mask = {
415 .src_addr = 0xffffffff,
416 .dst_addr = 0xffffffff,
422 mask = &rte_flow_item_ipv4_mask;
424 /* This is an exact match filter, both fields must be set */
425 if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
426 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
430 /* check that the suppied mask exactly matches capabilty */
431 if (!mask_exact_match((const u8 *)&supported_mask,
432 (const u8 *)item->mask, sizeof(*mask))) {
433 FLOW_LOG(ERR, "IPv4 exact match mask");
437 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
438 enic_5tup->src_addr = spec->hdr.src_addr;
439 enic_5tup->dst_addr = spec->hdr.dst_addr;
445 enic_copy_item_udp_v1(struct copy_item_args *arg)
447 const struct rte_flow_item *item = arg->item;
448 struct filter_v2 *enic_filter = arg->filter;
449 const struct rte_flow_item_udp *spec = item->spec;
450 const struct rte_flow_item_udp *mask = item->mask;
451 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
452 struct udp_hdr supported_mask = {
460 mask = &rte_flow_item_udp_mask;
462 /* This is an exact match filter, both ports must be set */
463 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
464 FLOW_LOG(ERR, "UDP exact match src/dst addr");
468 /* check that the suppied mask exactly matches capabilty */
469 if (!mask_exact_match((const u8 *)&supported_mask,
470 (const u8 *)item->mask, sizeof(*mask))) {
471 FLOW_LOG(ERR, "UDP exact match mask");
475 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
476 enic_5tup->src_port = spec->hdr.src_port;
477 enic_5tup->dst_port = spec->hdr.dst_port;
478 enic_5tup->protocol = PROTO_UDP;
484 enic_copy_item_tcp_v1(struct copy_item_args *arg)
486 const struct rte_flow_item *item = arg->item;
487 struct filter_v2 *enic_filter = arg->filter;
488 const struct rte_flow_item_tcp *spec = item->spec;
489 const struct rte_flow_item_tcp *mask = item->mask;
490 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
491 struct tcp_hdr supported_mask = {
499 mask = &rte_flow_item_tcp_mask;
501 /* This is an exact match filter, both ports must be set */
502 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
503 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
507 /* check that the suppied mask exactly matches capabilty */
508 if (!mask_exact_match((const u8 *)&supported_mask,
509 (const u8 *)item->mask, sizeof(*mask))) {
510 FLOW_LOG(ERR, "TCP exact match mask");
514 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
515 enic_5tup->src_port = spec->hdr.src_port;
516 enic_5tup->dst_port = spec->hdr.dst_port;
517 enic_5tup->protocol = PROTO_TCP;
523 * The common 'copy' function for all inner packet patterns. Patterns are
524 * first appended to the L5 pattern buffer. Then, since the NIC filter
525 * API has no special support for inner packet matching at the moment,
526 * we set EtherType and IP proto as necessary.
529 copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
530 const void *val, const void *mask, uint8_t val_size,
531 uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
533 uint8_t *l5_mask, *l5_val;
536 /* No space left in the L5 pattern buffer. */
537 start_off = *inner_ofst;
538 if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
540 l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
541 l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
542 /* Copy the pattern into the L5 buffer. */
544 memcpy(l5_mask + start_off, mask, val_size);
545 memcpy(l5_val + start_off, val, val_size);
547 /* Set the protocol field in the previous header. */
551 m = l5_mask + proto_off;
552 v = l5_val + proto_off;
553 if (proto_size == 1) {
554 *(uint8_t *)m = 0xff;
555 *(uint8_t *)v = (uint8_t)proto_val;
556 } else if (proto_size == 2) {
557 *(uint16_t *)m = 0xffff;
558 *(uint16_t *)v = proto_val;
561 /* All inner headers land in L5 buffer even if their spec is null. */
562 *inner_ofst += val_size;
567 enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
569 const void *mask = arg->item->mask;
570 uint8_t *off = arg->inner_ofst;
574 mask = &rte_flow_item_eth_mask;
575 arg->l2_proto_off = *off + offsetof(struct ether_hdr, ether_type);
576 return copy_inner_common(&arg->filter->u.generic_1, off,
577 arg->item->spec, mask, sizeof(struct ether_hdr),
578 0 /* no previous protocol */, 0, 0);
582 enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
584 const void *mask = arg->item->mask;
585 uint8_t *off = arg->inner_ofst;
586 uint8_t eth_type_off;
590 mask = &rte_flow_item_vlan_mask;
591 /* Append vlan header to L5 and set ether type = TPID */
592 eth_type_off = arg->l2_proto_off;
593 arg->l2_proto_off = *off + offsetof(struct vlan_hdr, eth_proto);
594 return copy_inner_common(&arg->filter->u.generic_1, off,
595 arg->item->spec, mask, sizeof(struct vlan_hdr),
596 eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);
600 enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
602 const void *mask = arg->item->mask;
603 uint8_t *off = arg->inner_ofst;
607 mask = &rte_flow_item_ipv4_mask;
608 /* Append ipv4 header to L5 and set ether type = ipv4 */
609 arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
610 return copy_inner_common(&arg->filter->u.generic_1, off,
611 arg->item->spec, mask, sizeof(struct ipv4_hdr),
612 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);
616 enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
618 const void *mask = arg->item->mask;
619 uint8_t *off = arg->inner_ofst;
623 mask = &rte_flow_item_ipv6_mask;
624 /* Append ipv6 header to L5 and set ether type = ipv6 */
625 arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
626 return copy_inner_common(&arg->filter->u.generic_1, off,
627 arg->item->spec, mask, sizeof(struct ipv6_hdr),
628 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);
632 enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
634 const void *mask = arg->item->mask;
635 uint8_t *off = arg->inner_ofst;
639 mask = &rte_flow_item_udp_mask;
640 /* Append udp header to L5 and set ip proto = udp */
641 return copy_inner_common(&arg->filter->u.generic_1, off,
642 arg->item->spec, mask, sizeof(struct udp_hdr),
643 arg->l3_proto_off, IPPROTO_UDP, 1);
647 enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
649 const void *mask = arg->item->mask;
650 uint8_t *off = arg->inner_ofst;
654 mask = &rte_flow_item_tcp_mask;
655 /* Append tcp header to L5 and set ip proto = tcp */
656 return copy_inner_common(&arg->filter->u.generic_1, off,
657 arg->item->spec, mask, sizeof(struct tcp_hdr),
658 arg->l3_proto_off, IPPROTO_TCP, 1);
662 enic_copy_item_eth_v2(struct copy_item_args *arg)
664 const struct rte_flow_item *item = arg->item;
665 struct filter_v2 *enic_filter = arg->filter;
666 struct ether_hdr enic_spec;
667 struct ether_hdr enic_mask;
668 const struct rte_flow_item_eth *spec = item->spec;
669 const struct rte_flow_item_eth *mask = item->mask;
670 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
674 /* Match all if no spec */
679 mask = &rte_flow_item_eth_mask;
681 memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
683 memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
686 memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
688 memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
690 enic_spec.ether_type = spec->type;
691 enic_mask.ether_type = mask->type;
694 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
695 sizeof(struct ether_hdr));
696 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
697 sizeof(struct ether_hdr));
702 enic_copy_item_vlan_v2(struct copy_item_args *arg)
704 const struct rte_flow_item *item = arg->item;
705 struct filter_v2 *enic_filter = arg->filter;
706 const struct rte_flow_item_vlan *spec = item->spec;
707 const struct rte_flow_item_vlan *mask = item->mask;
708 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
709 struct ether_hdr *eth_mask;
710 struct ether_hdr *eth_val;
714 /* Match all if no spec */
719 mask = &rte_flow_item_vlan_mask;
721 eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
722 eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
723 /* Outer TPID cannot be matched */
724 if (eth_mask->ether_type)
728 * When packet matching, the VIC always compares vlan-stripped
729 * L2, regardless of vlan stripping settings. So, the inner type
730 * from vlan becomes the ether type of the eth header.
732 * Older models w/o hardware vxlan parser have a different
733 * behavior when vlan stripping is disabled. In this case,
734 * vlan tag remains in the L2 buffer.
736 if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) {
737 struct vlan_hdr *vlan;
739 vlan = (struct vlan_hdr *)(eth_mask + 1);
740 vlan->eth_proto = mask->inner_type;
741 vlan = (struct vlan_hdr *)(eth_val + 1);
742 vlan->eth_proto = spec->inner_type;
744 eth_mask->ether_type = mask->inner_type;
745 eth_val->ether_type = spec->inner_type;
747 /* For TCI, use the vlan mask/val fields (little endian). */
748 gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
749 gp->val_vlan = rte_be_to_cpu_16(spec->tci);
754 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
756 const struct rte_flow_item *item = arg->item;
757 struct filter_v2 *enic_filter = arg->filter;
758 const struct rte_flow_item_ipv4 *spec = item->spec;
759 const struct rte_flow_item_ipv4 *mask = item->mask;
760 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
765 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
766 gp->val_flags |= FILTER_GENERIC_1_IPV4;
768 /* Match all if no spec */
773 mask = &rte_flow_item_ipv4_mask;
775 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
776 sizeof(struct ipv4_hdr));
777 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
778 sizeof(struct ipv4_hdr));
783 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
785 const struct rte_flow_item *item = arg->item;
786 struct filter_v2 *enic_filter = arg->filter;
787 const struct rte_flow_item_ipv6 *spec = item->spec;
788 const struct rte_flow_item_ipv6 *mask = item->mask;
789 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
794 gp->mask_flags |= FILTER_GENERIC_1_IPV6;
795 gp->val_flags |= FILTER_GENERIC_1_IPV6;
797 /* Match all if no spec */
802 mask = &rte_flow_item_ipv6_mask;
804 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
805 sizeof(struct ipv6_hdr));
806 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
807 sizeof(struct ipv6_hdr));
812 enic_copy_item_udp_v2(struct copy_item_args *arg)
814 const struct rte_flow_item *item = arg->item;
815 struct filter_v2 *enic_filter = arg->filter;
816 const struct rte_flow_item_udp *spec = item->spec;
817 const struct rte_flow_item_udp *mask = item->mask;
818 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
823 gp->mask_flags |= FILTER_GENERIC_1_UDP;
824 gp->val_flags |= FILTER_GENERIC_1_UDP;
826 /* Match all if no spec */
831 mask = &rte_flow_item_udp_mask;
833 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
834 sizeof(struct udp_hdr));
835 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
836 sizeof(struct udp_hdr));
841 enic_copy_item_tcp_v2(struct copy_item_args *arg)
843 const struct rte_flow_item *item = arg->item;
844 struct filter_v2 *enic_filter = arg->filter;
845 const struct rte_flow_item_tcp *spec = item->spec;
846 const struct rte_flow_item_tcp *mask = item->mask;
847 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
852 gp->mask_flags |= FILTER_GENERIC_1_TCP;
853 gp->val_flags |= FILTER_GENERIC_1_TCP;
855 /* Match all if no spec */
862 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
863 sizeof(struct tcp_hdr));
864 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
865 sizeof(struct tcp_hdr));
870 enic_copy_item_sctp_v2(struct copy_item_args *arg)
872 const struct rte_flow_item *item = arg->item;
873 struct filter_v2 *enic_filter = arg->filter;
874 const struct rte_flow_item_sctp *spec = item->spec;
875 const struct rte_flow_item_sctp *mask = item->mask;
876 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
877 uint8_t *ip_proto_mask = NULL;
878 uint8_t *ip_proto = NULL;
883 * The NIC filter API has no flags for "match sctp", so explicitly set
884 * the protocol number in the IP pattern.
886 if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
888 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
889 ip_proto_mask = &ip->next_proto_id;
890 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
891 ip_proto = &ip->next_proto_id;
892 } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
894 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
895 ip_proto_mask = &ip->proto;
896 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
897 ip_proto = &ip->proto;
899 /* Need IPv4/IPv6 pattern first */
902 *ip_proto = IPPROTO_SCTP;
903 *ip_proto_mask = 0xff;
905 /* Match all if no spec */
910 mask = &rte_flow_item_sctp_mask;
912 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
913 sizeof(struct sctp_hdr));
914 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
915 sizeof(struct sctp_hdr));
920 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
922 const struct rte_flow_item *item = arg->item;
923 struct filter_v2 *enic_filter = arg->filter;
924 uint8_t *inner_ofst = arg->inner_ofst;
925 const struct rte_flow_item_vxlan *spec = item->spec;
926 const struct rte_flow_item_vxlan *mask = item->mask;
927 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
933 * The NIC filter API has no flags for "match vxlan". Set UDP port to
934 * avoid false positives.
936 gp->mask_flags |= FILTER_GENERIC_1_UDP;
937 gp->val_flags |= FILTER_GENERIC_1_UDP;
938 udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
939 udp->dst_port = 0xffff;
940 udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
941 udp->dst_port = RTE_BE16(4789);
942 /* Match all if no spec */
947 mask = &rte_flow_item_vxlan_mask;
949 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
950 sizeof(struct vxlan_hdr));
951 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
952 sizeof(struct vxlan_hdr));
954 *inner_ofst = sizeof(struct vxlan_hdr);
959 * Copy raw item into version 2 NIC filter. Currently, raw pattern match is
960 * very limited. It is intended for matching UDP tunnel header (e.g. vxlan
964 enic_copy_item_raw_v2(struct copy_item_args *arg)
966 const struct rte_flow_item *item = arg->item;
967 struct filter_v2 *enic_filter = arg->filter;
968 uint8_t *inner_ofst = arg->inner_ofst;
969 const struct rte_flow_item_raw *spec = item->spec;
970 const struct rte_flow_item_raw *mask = item->mask;
971 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
975 /* Cannot be used for inner packet */
978 /* Need both spec and mask */
981 /* Only supports relative with offset 0 */
982 if (!spec->relative || spec->offset != 0 || spec->search || spec->limit)
984 /* Need non-null pattern that fits within the NIC's filter pattern */
985 if (spec->length == 0 ||
986 spec->length + sizeof(struct udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
987 !spec->pattern || !mask->pattern)
990 * Mask fields, including length, are often set to zero. Assume that
991 * means "same as spec" to avoid breaking existing apps. If length
992 * is not zero, then it should be >= spec length.
994 * No more pattern follows this, so append to the L4 layer instead of
995 * L5 to work with both recent and older VICs.
997 if (mask->length != 0 && mask->length < spec->length)
999 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
1000 mask->pattern, spec->length);
1001 memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
1002 spec->pattern, spec->length);
1008 * Return 1 if current item is valid on top of the previous one.
1010 * @param prev_item[in]
1011 * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
1012 * is the first item.
1013 * @param item_info[in]
1014 * Info about this item, like valid previous items.
1015 * @param is_first[in]
1016 * True if this the first item in the pattern.
1019 item_stacking_valid(enum rte_flow_item_type prev_item,
1020 const struct enic_items *item_info, u8 is_first_item)
1022 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
1026 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
1027 if (prev_item == *allowed_items)
1031 /* This is the first item in the stack. Check if that's cool */
1032 if (is_first_item && item_info->valid_start_item)
1039 * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
1040 * Instead it is in L4 following the UDP header. Append the vxlan
1041 * pattern to L4 (udp) and shift any inner packet pattern in L5.
1044 fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
1047 uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
1051 if (!(inner_ofst > 0 && enic->vxlan))
1054 vxlan = sizeof(struct vxlan_hdr);
1055 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
1056 gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
1057 memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
1058 gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
1059 inner = inner_ofst - vxlan;
1060 memset(layer, 0, sizeof(layer));
1061 memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
1062 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
1063 memset(layer, 0, sizeof(layer));
1064 memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
1065 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
1069 * Build the intenal enic filter structure from the provided pattern. The
1070 * pattern is validated as the items are copied.
1072 * @param pattern[in]
1073 * @param items_info[in]
1074 * Info about this NICs item support, like valid previous items.
1075 * @param enic_filter[out]
1076 * NIC specfilc filters derived from the pattern.
1080 enic_copy_filter(const struct rte_flow_item pattern[],
1081 const struct enic_filter_cap *cap,
1083 struct filter_v2 *enic_filter,
1084 struct rte_flow_error *error)
1087 const struct rte_flow_item *item = pattern;
1088 u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
1089 enum rte_flow_item_type prev_item;
1090 const struct enic_items *item_info;
1091 struct copy_item_args args;
1092 enic_copy_item_fn *copy_fn;
1093 u8 is_first_item = 1;
1099 args.filter = enic_filter;
1100 args.inner_ofst = &inner_ofst;
1102 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1103 /* Get info about how to validate and copy the item. If NULL
1104 * is returned the nic does not support the item.
1106 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1109 item_info = &cap->item_info[item->type];
1110 if (item->type > cap->max_item_type ||
1111 item_info->copy_item == NULL ||
1112 (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
1113 rte_flow_error_set(error, ENOTSUP,
1114 RTE_FLOW_ERROR_TYPE_ITEM,
1115 NULL, "Unsupported item.");
1119 /* check to see if item stacking is valid */
1120 if (!item_stacking_valid(prev_item, item_info, is_first_item))
1121 goto stacking_error;
1124 copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
1125 item_info->copy_item;
1126 ret = copy_fn(&args);
1128 goto item_not_supported;
1129 prev_item = item->type;
1132 fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1137 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1138 NULL, "enic type error");
1142 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1143 item, "stacking error");
1148 * Build the intenal version 1 NIC action structure from the provided pattern.
1149 * The pattern is validated as the items are copied.
1151 * @param actions[in]
1152 * @param enic_action[out]
1153 * NIC specfilc actions derived from the actions.
1157 enic_copy_action_v1(__rte_unused struct enic *enic,
1158 const struct rte_flow_action actions[],
1159 struct filter_action_v2 *enic_action)
1162 uint32_t overlap = 0;
1166 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1167 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1170 switch (actions->type) {
1171 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1172 const struct rte_flow_action_queue *queue =
1173 (const struct rte_flow_action_queue *)
1179 enic_action->rq_idx =
1180 enic_rte_rq_idx_to_sop_idx(queue->index);
1188 if (!(overlap & FATE))
1190 enic_action->type = FILTER_ACTION_RQ_STEERING;
1195 * Build the intenal version 2 NIC action structure from the provided pattern.
1196 * The pattern is validated as the items are copied.
1198 * @param actions[in]
1199 * @param enic_action[out]
1200 * NIC specfilc actions derived from the actions.
1204 enic_copy_action_v2(struct enic *enic,
1205 const struct rte_flow_action actions[],
1206 struct filter_action_v2 *enic_action)
1208 enum { FATE = 1, MARK = 2, };
1209 uint32_t overlap = 0;
1210 bool passthru = false;
1214 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1215 switch (actions->type) {
1216 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1217 const struct rte_flow_action_queue *queue =
1218 (const struct rte_flow_action_queue *)
1224 enic_action->rq_idx =
1225 enic_rte_rq_idx_to_sop_idx(queue->index);
1226 enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1229 case RTE_FLOW_ACTION_TYPE_MARK: {
1230 const struct rte_flow_action_mark *mark =
1231 (const struct rte_flow_action_mark *)
1238 * Map mark ID (32-bit) to filter ID (16-bit):
1239 * - Reject values > 16 bits
1240 * - Filter ID 0 is reserved for filters that steer
1241 * but not mark. So add 1 to the mark ID to avoid
1243 * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1244 * reserved for the "flag" action below.
1246 if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1248 enic_action->filter_id = mark->id + 1;
1249 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1252 case RTE_FLOW_ACTION_TYPE_FLAG: {
1256 /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1257 enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1258 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1261 case RTE_FLOW_ACTION_TYPE_DROP: {
1265 enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1268 case RTE_FLOW_ACTION_TYPE_COUNT: {
1269 enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1272 case RTE_FLOW_ACTION_TYPE_RSS: {
1273 const struct rte_flow_action_rss *rss =
1274 (const struct rte_flow_action_rss *)
1280 * Hardware does not support general RSS actions, but
1281 * we can still support the dummy one that is used to
1282 * "receive normally".
1284 allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1287 rss->types == enic->rss_hf) &&
1288 rss->queue_num == enic->rq_count &&
1290 /* Identity queue map is ok */
1291 for (i = 0; i < rss->queue_num; i++)
1292 allow = allow && (i == rss->queue[i]);
1297 /* Need MARK or FLAG */
1298 if (!(overlap & MARK))
1303 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1305 * Like RSS above, PASSTHRU + MARK may be used to
1306 * "mark and then receive normally". MARK usually comes
1307 * after PASSTHRU, so remember we have seen passthru
1308 * and check for mark later.
1316 case RTE_FLOW_ACTION_TYPE_VOID:
1323 /* Only PASSTHRU + MARK is allowed */
1324 if (passthru && !(overlap & MARK))
1326 if (!(overlap & FATE))
1328 enic_action->type = FILTER_ACTION_V2;
1332 /** Check if the action is supported */
1334 enic_match_action(const struct rte_flow_action *action,
1335 const enum rte_flow_action_type *supported_actions)
1337 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1338 supported_actions++) {
1339 if (action->type == *supported_actions)
1345 /** Get the NIC filter capabilties structure */
1346 static const struct enic_filter_cap *
1347 enic_get_filter_cap(struct enic *enic)
1349 if (enic->flow_filter_mode)
1350 return &enic_filter_cap[enic->flow_filter_mode];
1355 /** Get the actions for this NIC version. */
1356 static const struct enic_action_cap *
1357 enic_get_action_cap(struct enic *enic)
1359 const struct enic_action_cap *ea;
1362 actions = enic->filter_actions;
1363 if (actions & FILTER_ACTION_COUNTER_FLAG)
1364 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1365 else if (actions & FILTER_ACTION_DROP_FLAG)
1366 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1367 else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1368 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1370 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1374 /* Debug function to dump internal NIC action structure. */
1376 enic_dump_actions(const struct filter_action_v2 *ea)
1378 if (ea->type == FILTER_ACTION_RQ_STEERING) {
1379 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1380 } else if (ea->type == FILTER_ACTION_V2) {
1381 FLOW_LOG(INFO, "Actions(V2)\n");
1382 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1383 FLOW_LOG(INFO, "\tqueue: %u\n",
1384 enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1385 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1386 FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1390 /* Debug function to dump internal NIC filter structure. */
1392 enic_dump_filter(const struct filter_v2 *filt)
1394 const struct filter_generic_1 *gp;
1397 char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1398 char l4csum[16], ipfrag[16];
1400 switch (filt->type) {
1401 case FILTER_IPV4_5TUPLE:
1402 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1404 case FILTER_USNIC_IP:
1406 /* FIXME: this should be a loop */
1407 gp = &filt->u.generic_1;
1408 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1409 gp->val_vlan, gp->mask_vlan);
1411 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1413 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1414 ? "ip4(y)" : "ip4(n)");
1416 sprintf(ip4, "%s ", "ip4(x)");
1418 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1420 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1421 ? "ip6(y)" : "ip6(n)");
1423 sprintf(ip6, "%s ", "ip6(x)");
1425 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1427 (gp->val_flags & FILTER_GENERIC_1_UDP)
1428 ? "udp(y)" : "udp(n)");
1430 sprintf(udp, "%s ", "udp(x)");
1432 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1434 (gp->val_flags & FILTER_GENERIC_1_TCP)
1435 ? "tcp(y)" : "tcp(n)");
1437 sprintf(tcp, "%s ", "tcp(x)");
1439 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1440 sprintf(tcpudp, "%s ",
1441 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1442 ? "tcpudp(y)" : "tcpudp(n)");
1444 sprintf(tcpudp, "%s ", "tcpudp(x)");
1446 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1447 sprintf(ip4csum, "%s ",
1448 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1449 ? "ip4csum(y)" : "ip4csum(n)");
1451 sprintf(ip4csum, "%s ", "ip4csum(x)");
1453 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1454 sprintf(l4csum, "%s ",
1455 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1456 ? "l4csum(y)" : "l4csum(n)");
1458 sprintf(l4csum, "%s ", "l4csum(x)");
1460 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1461 sprintf(ipfrag, "%s ",
1462 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1463 ? "ipfrag(y)" : "ipfrag(n)");
1465 sprintf(ipfrag, "%s ", "ipfrag(x)");
1466 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1467 tcp, tcpudp, ip4csum, l4csum, ipfrag);
1469 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1470 mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1471 while (mbyte && !gp->layer[i].mask[mbyte])
1477 for (j = 0; j <= mbyte; j++) {
1479 gp->layer[i].mask[j]);
1483 FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1485 for (j = 0; j <= mbyte; j++) {
1487 gp->layer[i].val[j]);
1491 FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
1495 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1500 /* Debug function to dump internal NIC flow structures. */
1502 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1504 enic_dump_filter(filt);
1505 enic_dump_actions(ea);
1510 * Internal flow parse/validate function.
1513 * This device pointer.
1514 * @param pattern[in]
1515 * @param actions[in]
1517 * @param enic_filter[out]
1518 * Internal NIC filter structure pointer.
1519 * @param enic_action[out]
1520 * Internal NIC action structure pointer.
1523 enic_flow_parse(struct rte_eth_dev *dev,
1524 const struct rte_flow_attr *attrs,
1525 const struct rte_flow_item pattern[],
1526 const struct rte_flow_action actions[],
1527 struct rte_flow_error *error,
1528 struct filter_v2 *enic_filter,
1529 struct filter_action_v2 *enic_action)
1531 unsigned int ret = 0;
1532 struct enic *enic = pmd_priv(dev);
1533 const struct enic_filter_cap *enic_filter_cap;
1534 const struct enic_action_cap *enic_action_cap;
1535 const struct rte_flow_action *action;
1539 memset(enic_filter, 0, sizeof(*enic_filter));
1540 memset(enic_action, 0, sizeof(*enic_action));
1543 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1544 NULL, "No pattern specified");
1549 rte_flow_error_set(error, EINVAL,
1550 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1551 NULL, "No action specified");
1557 rte_flow_error_set(error, ENOTSUP,
1558 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1560 "priority groups are not supported");
1562 } else if (attrs->priority) {
1563 rte_flow_error_set(error, ENOTSUP,
1564 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1566 "priorities are not supported");
1568 } else if (attrs->egress) {
1569 rte_flow_error_set(error, ENOTSUP,
1570 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1572 "egress is not supported");
1574 } else if (attrs->transfer) {
1575 rte_flow_error_set(error, ENOTSUP,
1576 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1578 "transfer is not supported");
1580 } else if (!attrs->ingress) {
1581 rte_flow_error_set(error, ENOTSUP,
1582 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1584 "only ingress is supported");
1589 rte_flow_error_set(error, EINVAL,
1590 RTE_FLOW_ERROR_TYPE_ATTR,
1591 NULL, "No attribute specified");
1595 /* Verify Actions. */
1596 enic_action_cap = enic_get_action_cap(enic);
1597 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1599 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1601 else if (!enic_match_action(action, enic_action_cap->actions))
1604 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1605 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1606 action, "Invalid action.");
1609 ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1611 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1612 NULL, "Unsupported action.");
1616 /* Verify Flow items. If copying the filter from flow format to enic
1617 * format fails, the flow is not supported
1619 enic_filter_cap = enic_get_filter_cap(enic);
1620 if (enic_filter_cap == NULL) {
1621 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1622 NULL, "Flow API not available");
1625 enic_filter->type = enic->flow_filter_mode;
1626 ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1627 enic_filter, error);
1632 * Push filter/action to the NIC.
1635 * Device structure pointer.
1636 * @param enic_filter[in]
1637 * Internal NIC filter structure pointer.
1638 * @param enic_action[in]
1639 * Internal NIC action structure pointer.
1642 static struct rte_flow *
1643 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1644 struct filter_action_v2 *enic_action,
1645 struct rte_flow_error *error)
1647 struct rte_flow *flow;
1651 int last_max_flow_ctr;
1655 flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1657 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1658 NULL, "cannot allocate flow memory");
1662 flow->counter_idx = -1;
1663 last_max_flow_ctr = -1;
1664 if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1665 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1666 rte_flow_error_set(error, ENOMEM,
1667 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1668 NULL, "cannot allocate counter");
1669 goto unwind_flow_alloc;
1671 flow->counter_idx = ctr_idx;
1672 enic_action->counter_index = ctr_idx;
1674 /* If index is the largest, increase the counter DMA size */
1675 if (ctr_idx > enic->max_flow_counter) {
1676 err = vnic_dev_counter_dma_cfg(enic->vdev,
1677 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1680 rte_flow_error_set(error, -err,
1681 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1682 NULL, "counter DMA config failed");
1683 goto unwind_ctr_alloc;
1685 last_max_flow_ctr = enic->max_flow_counter;
1686 enic->max_flow_counter = ctr_idx;
1690 /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1691 entry = enic_action->rq_idx;
1692 err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1695 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1696 NULL, "vnic_dev_classifier error");
1697 goto unwind_ctr_dma_cfg;
1700 flow->enic_filter_id = entry;
1701 flow->enic_filter = *enic_filter;
1705 /* unwind if there are errors */
1707 if (last_max_flow_ctr != -1) {
1708 /* reduce counter DMA size */
1709 vnic_dev_counter_dma_cfg(enic->vdev,
1710 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1711 last_max_flow_ctr + 1);
1712 enic->max_flow_counter = last_max_flow_ctr;
1715 if (flow->counter_idx != -1)
1716 vnic_dev_counter_free(enic->vdev, ctr_idx);
1723 * Remove filter/action from the NIC.
1726 * Device structure pointer.
1727 * @param filter_id[in]
1729 * @param enic_action[in]
1730 * Internal NIC action structure pointer.
1734 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1735 struct rte_flow_error *error)
1742 filter_id = flow->enic_filter_id;
1743 err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1745 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1746 NULL, "vnic_dev_classifier failed");
1750 if (flow->counter_idx != -1) {
1751 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1752 dev_err(enic, "counter free failed, idx: %d\n",
1754 flow->counter_idx = -1;
1760 * The following functions are callbacks for Generic flow API.
1764 * Validate a flow supported by the NIC.
1766 * @see rte_flow_validate()
1770 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1771 const struct rte_flow_item pattern[],
1772 const struct rte_flow_action actions[],
1773 struct rte_flow_error *error)
1775 struct filter_v2 enic_filter;
1776 struct filter_action_v2 enic_action;
1781 ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1782 &enic_filter, &enic_action);
1784 enic_dump_flow(&enic_action, &enic_filter);
1789 * Create a flow supported by the NIC.
1791 * @see rte_flow_create()
1794 static struct rte_flow *
1795 enic_flow_create(struct rte_eth_dev *dev,
1796 const struct rte_flow_attr *attrs,
1797 const struct rte_flow_item pattern[],
1798 const struct rte_flow_action actions[],
1799 struct rte_flow_error *error)
1802 struct filter_v2 enic_filter;
1803 struct filter_action_v2 enic_action;
1804 struct rte_flow *flow;
1805 struct enic *enic = pmd_priv(dev);
1809 ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1814 rte_spinlock_lock(&enic->flows_lock);
1815 flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1818 LIST_INSERT_HEAD(&enic->flows, flow, next);
1819 rte_spinlock_unlock(&enic->flows_lock);
1825 * Destroy a flow supported by the NIC.
1827 * @see rte_flow_destroy()
1831 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1832 __rte_unused struct rte_flow_error *error)
1834 struct enic *enic = pmd_priv(dev);
1838 rte_spinlock_lock(&enic->flows_lock);
1839 enic_flow_del_filter(enic, flow, error);
1840 LIST_REMOVE(flow, next);
1841 rte_spinlock_unlock(&enic->flows_lock);
1847 * Flush all flows on the device.
1849 * @see rte_flow_flush()
1853 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1855 struct rte_flow *flow;
1856 struct enic *enic = pmd_priv(dev);
1860 rte_spinlock_lock(&enic->flows_lock);
1862 while (!LIST_EMPTY(&enic->flows)) {
1863 flow = LIST_FIRST(&enic->flows);
1864 enic_flow_del_filter(enic, flow, error);
1865 LIST_REMOVE(flow, next);
1868 rte_spinlock_unlock(&enic->flows_lock);
1873 enic_flow_query_count(struct rte_eth_dev *dev,
1874 struct rte_flow *flow, void *data,
1875 struct rte_flow_error *error)
1877 struct enic *enic = pmd_priv(dev);
1878 struct rte_flow_query_count *query;
1879 uint64_t packets, bytes;
1883 if (flow->counter_idx == -1) {
1884 return rte_flow_error_set(error, ENOTSUP,
1885 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1887 "flow does not have counter");
1889 query = (struct rte_flow_query_count *)data;
1890 if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1891 !!query->reset, &packets, &bytes)) {
1892 return rte_flow_error_set
1894 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1896 "cannot read counter");
1898 query->hits_set = 1;
1899 query->bytes_set = 1;
1900 query->hits = packets;
1901 query->bytes = bytes;
1906 enic_flow_query(struct rte_eth_dev *dev,
1907 struct rte_flow *flow,
1908 const struct rte_flow_action *actions,
1910 struct rte_flow_error *error)
1916 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1917 switch (actions->type) {
1918 case RTE_FLOW_ACTION_TYPE_VOID:
1920 case RTE_FLOW_ACTION_TYPE_COUNT:
1921 ret = enic_flow_query_count(dev, flow, data, error);
1924 return rte_flow_error_set(error, ENOTSUP,
1925 RTE_FLOW_ERROR_TYPE_ACTION,
1927 "action not supported");
1936 * Flow callback registration.
1940 const struct rte_flow_ops enic_flow_ops = {
1941 .validate = enic_flow_validate,
1942 .create = enic_flow_create,
1943 .destroy = enic_flow_destroy,
1944 .flush = enic_flow_flush,
1945 .query = enic_flow_query,