1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
14 #include "enic_compat.h"
19 #define FLOW_TRACE() \
20 rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
22 #define FLOW_LOG(level, fmt, args...) \
23 rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
27 * Common arguments passed to copy_item functions. Use this structure
28 * so we can easily add new arguments.
29 * item: Item specification.
30 * filter: Partially filled in NIC filter structure.
31 * inner_ofst: If zero, this is an outer header. If non-zero, this is
32 * the offset into L5 where the header begins.
33 * l2_proto_off: offset to EtherType eth or vlan header.
34 * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
36 struct copy_item_args {
37 const struct rte_flow_item *item;
38 struct filter_v2 *filter;
44 /* functions for copying items into enic filters */
45 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
47 /** Info about how to copy items into enic filters. */
49 /** Function for copying and validating an item. */
50 enic_copy_item_fn *copy_item;
51 /** List of valid previous items. */
52 const enum rte_flow_item_type * const prev_items;
53 /** True if it's OK for this item to be the first item. For some NIC
54 * versions, it's invalid to start the stack above layer 3.
56 const u8 valid_start_item;
57 /* Inner packet version of copy_item. */
58 enic_copy_item_fn *inner_copy_item;
61 /** Filtering capabilities for various NIC and firmware versions. */
62 struct enic_filter_cap {
63 /** list of valid items and their handlers and attributes. */
64 const struct enic_items *item_info;
65 /* Max type in the above list, used to detect unsupported types */
66 enum rte_flow_item_type max_item_type;
69 /* functions for copying flow actions into enic actions */
70 typedef int (copy_action_fn)(struct enic *enic,
71 const struct rte_flow_action actions[],
72 struct filter_action_v2 *enic_action);
74 /** Action capabilities for various NICs. */
75 struct enic_action_cap {
76 /** list of valid actions */
77 const enum rte_flow_action_type *actions;
78 /** copy function for a particular NIC */
79 copy_action_fn *copy_fn;
82 /* Forward declarations */
83 static enic_copy_item_fn enic_copy_item_ipv4_v1;
84 static enic_copy_item_fn enic_copy_item_udp_v1;
85 static enic_copy_item_fn enic_copy_item_tcp_v1;
86 static enic_copy_item_fn enic_copy_item_raw_v2;
87 static enic_copy_item_fn enic_copy_item_eth_v2;
88 static enic_copy_item_fn enic_copy_item_vlan_v2;
89 static enic_copy_item_fn enic_copy_item_ipv4_v2;
90 static enic_copy_item_fn enic_copy_item_ipv6_v2;
91 static enic_copy_item_fn enic_copy_item_udp_v2;
92 static enic_copy_item_fn enic_copy_item_tcp_v2;
93 static enic_copy_item_fn enic_copy_item_sctp_v2;
94 static enic_copy_item_fn enic_copy_item_vxlan_v2;
95 static enic_copy_item_fn enic_copy_item_inner_eth_v2;
96 static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
97 static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
98 static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
99 static enic_copy_item_fn enic_copy_item_inner_udp_v2;
100 static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
101 static copy_action_fn enic_copy_action_v1;
102 static copy_action_fn enic_copy_action_v2;
105 * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
108 static const struct enic_items enic_items_v1[] = {
109 [RTE_FLOW_ITEM_TYPE_IPV4] = {
110 .copy_item = enic_copy_item_ipv4_v1,
111 .valid_start_item = 1,
112 .prev_items = (const enum rte_flow_item_type[]) {
113 RTE_FLOW_ITEM_TYPE_END,
115 .inner_copy_item = NULL,
117 [RTE_FLOW_ITEM_TYPE_UDP] = {
118 .copy_item = enic_copy_item_udp_v1,
119 .valid_start_item = 0,
120 .prev_items = (const enum rte_flow_item_type[]) {
121 RTE_FLOW_ITEM_TYPE_IPV4,
122 RTE_FLOW_ITEM_TYPE_END,
124 .inner_copy_item = NULL,
126 [RTE_FLOW_ITEM_TYPE_TCP] = {
127 .copy_item = enic_copy_item_tcp_v1,
128 .valid_start_item = 0,
129 .prev_items = (const enum rte_flow_item_type[]) {
130 RTE_FLOW_ITEM_TYPE_IPV4,
131 RTE_FLOW_ITEM_TYPE_END,
133 .inner_copy_item = NULL,
138 * NICs have Advanced Filters capability but they are disabled. This means
139 * that layer 3 must be specified.
141 static const struct enic_items enic_items_v2[] = {
142 [RTE_FLOW_ITEM_TYPE_RAW] = {
143 .copy_item = enic_copy_item_raw_v2,
144 .valid_start_item = 0,
145 .prev_items = (const enum rte_flow_item_type[]) {
146 RTE_FLOW_ITEM_TYPE_UDP,
147 RTE_FLOW_ITEM_TYPE_END,
149 .inner_copy_item = NULL,
151 [RTE_FLOW_ITEM_TYPE_ETH] = {
152 .copy_item = enic_copy_item_eth_v2,
153 .valid_start_item = 1,
154 .prev_items = (const enum rte_flow_item_type[]) {
155 RTE_FLOW_ITEM_TYPE_VXLAN,
156 RTE_FLOW_ITEM_TYPE_END,
158 .inner_copy_item = enic_copy_item_inner_eth_v2,
160 [RTE_FLOW_ITEM_TYPE_VLAN] = {
161 .copy_item = enic_copy_item_vlan_v2,
162 .valid_start_item = 1,
163 .prev_items = (const enum rte_flow_item_type[]) {
164 RTE_FLOW_ITEM_TYPE_ETH,
165 RTE_FLOW_ITEM_TYPE_END,
167 .inner_copy_item = enic_copy_item_inner_vlan_v2,
169 [RTE_FLOW_ITEM_TYPE_IPV4] = {
170 .copy_item = enic_copy_item_ipv4_v2,
171 .valid_start_item = 1,
172 .prev_items = (const enum rte_flow_item_type[]) {
173 RTE_FLOW_ITEM_TYPE_ETH,
174 RTE_FLOW_ITEM_TYPE_VLAN,
175 RTE_FLOW_ITEM_TYPE_END,
177 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
179 [RTE_FLOW_ITEM_TYPE_IPV6] = {
180 .copy_item = enic_copy_item_ipv6_v2,
181 .valid_start_item = 1,
182 .prev_items = (const enum rte_flow_item_type[]) {
183 RTE_FLOW_ITEM_TYPE_ETH,
184 RTE_FLOW_ITEM_TYPE_VLAN,
185 RTE_FLOW_ITEM_TYPE_END,
187 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
189 [RTE_FLOW_ITEM_TYPE_UDP] = {
190 .copy_item = enic_copy_item_udp_v2,
191 .valid_start_item = 0,
192 .prev_items = (const enum rte_flow_item_type[]) {
193 RTE_FLOW_ITEM_TYPE_IPV4,
194 RTE_FLOW_ITEM_TYPE_IPV6,
195 RTE_FLOW_ITEM_TYPE_END,
197 .inner_copy_item = enic_copy_item_inner_udp_v2,
199 [RTE_FLOW_ITEM_TYPE_TCP] = {
200 .copy_item = enic_copy_item_tcp_v2,
201 .valid_start_item = 0,
202 .prev_items = (const enum rte_flow_item_type[]) {
203 RTE_FLOW_ITEM_TYPE_IPV4,
204 RTE_FLOW_ITEM_TYPE_IPV6,
205 RTE_FLOW_ITEM_TYPE_END,
207 .inner_copy_item = enic_copy_item_inner_tcp_v2,
209 [RTE_FLOW_ITEM_TYPE_SCTP] = {
210 .copy_item = enic_copy_item_sctp_v2,
211 .valid_start_item = 0,
212 .prev_items = (const enum rte_flow_item_type[]) {
213 RTE_FLOW_ITEM_TYPE_IPV4,
214 RTE_FLOW_ITEM_TYPE_IPV6,
215 RTE_FLOW_ITEM_TYPE_END,
217 .inner_copy_item = NULL,
219 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
220 .copy_item = enic_copy_item_vxlan_v2,
221 .valid_start_item = 0,
222 .prev_items = (const enum rte_flow_item_type[]) {
223 RTE_FLOW_ITEM_TYPE_UDP,
224 RTE_FLOW_ITEM_TYPE_END,
226 .inner_copy_item = NULL,
230 /** NICs with Advanced filters enabled */
231 static const struct enic_items enic_items_v3[] = {
232 [RTE_FLOW_ITEM_TYPE_RAW] = {
233 .copy_item = enic_copy_item_raw_v2,
234 .valid_start_item = 0,
235 .prev_items = (const enum rte_flow_item_type[]) {
236 RTE_FLOW_ITEM_TYPE_UDP,
237 RTE_FLOW_ITEM_TYPE_END,
239 .inner_copy_item = NULL,
241 [RTE_FLOW_ITEM_TYPE_ETH] = {
242 .copy_item = enic_copy_item_eth_v2,
243 .valid_start_item = 1,
244 .prev_items = (const enum rte_flow_item_type[]) {
245 RTE_FLOW_ITEM_TYPE_VXLAN,
246 RTE_FLOW_ITEM_TYPE_END,
248 .inner_copy_item = enic_copy_item_inner_eth_v2,
250 [RTE_FLOW_ITEM_TYPE_VLAN] = {
251 .copy_item = enic_copy_item_vlan_v2,
252 .valid_start_item = 1,
253 .prev_items = (const enum rte_flow_item_type[]) {
254 RTE_FLOW_ITEM_TYPE_ETH,
255 RTE_FLOW_ITEM_TYPE_END,
257 .inner_copy_item = enic_copy_item_inner_vlan_v2,
259 [RTE_FLOW_ITEM_TYPE_IPV4] = {
260 .copy_item = enic_copy_item_ipv4_v2,
261 .valid_start_item = 1,
262 .prev_items = (const enum rte_flow_item_type[]) {
263 RTE_FLOW_ITEM_TYPE_ETH,
264 RTE_FLOW_ITEM_TYPE_VLAN,
265 RTE_FLOW_ITEM_TYPE_END,
267 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
269 [RTE_FLOW_ITEM_TYPE_IPV6] = {
270 .copy_item = enic_copy_item_ipv6_v2,
271 .valid_start_item = 1,
272 .prev_items = (const enum rte_flow_item_type[]) {
273 RTE_FLOW_ITEM_TYPE_ETH,
274 RTE_FLOW_ITEM_TYPE_VLAN,
275 RTE_FLOW_ITEM_TYPE_END,
277 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
279 [RTE_FLOW_ITEM_TYPE_UDP] = {
280 .copy_item = enic_copy_item_udp_v2,
281 .valid_start_item = 1,
282 .prev_items = (const enum rte_flow_item_type[]) {
283 RTE_FLOW_ITEM_TYPE_IPV4,
284 RTE_FLOW_ITEM_TYPE_IPV6,
285 RTE_FLOW_ITEM_TYPE_END,
287 .inner_copy_item = enic_copy_item_inner_udp_v2,
289 [RTE_FLOW_ITEM_TYPE_TCP] = {
290 .copy_item = enic_copy_item_tcp_v2,
291 .valid_start_item = 1,
292 .prev_items = (const enum rte_flow_item_type[]) {
293 RTE_FLOW_ITEM_TYPE_IPV4,
294 RTE_FLOW_ITEM_TYPE_IPV6,
295 RTE_FLOW_ITEM_TYPE_END,
297 .inner_copy_item = enic_copy_item_inner_tcp_v2,
299 [RTE_FLOW_ITEM_TYPE_SCTP] = {
300 .copy_item = enic_copy_item_sctp_v2,
301 .valid_start_item = 0,
302 .prev_items = (const enum rte_flow_item_type[]) {
303 RTE_FLOW_ITEM_TYPE_IPV4,
304 RTE_FLOW_ITEM_TYPE_IPV6,
305 RTE_FLOW_ITEM_TYPE_END,
307 .inner_copy_item = NULL,
309 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
310 .copy_item = enic_copy_item_vxlan_v2,
311 .valid_start_item = 1,
312 .prev_items = (const enum rte_flow_item_type[]) {
313 RTE_FLOW_ITEM_TYPE_UDP,
314 RTE_FLOW_ITEM_TYPE_END,
316 .inner_copy_item = NULL,
320 /** Filtering capabilities indexed this NICs supported filter type. */
321 static const struct enic_filter_cap enic_filter_cap[] = {
322 [FILTER_IPV4_5TUPLE] = {
323 .item_info = enic_items_v1,
324 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
326 [FILTER_USNIC_IP] = {
327 .item_info = enic_items_v2,
328 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
331 .item_info = enic_items_v3,
332 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
336 /** Supported actions for older NICs */
337 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
338 RTE_FLOW_ACTION_TYPE_QUEUE,
339 RTE_FLOW_ACTION_TYPE_END,
342 /** Supported actions for newer NICs */
343 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
344 RTE_FLOW_ACTION_TYPE_QUEUE,
345 RTE_FLOW_ACTION_TYPE_MARK,
346 RTE_FLOW_ACTION_TYPE_FLAG,
347 RTE_FLOW_ACTION_TYPE_RSS,
348 RTE_FLOW_ACTION_TYPE_PASSTHRU,
349 RTE_FLOW_ACTION_TYPE_END,
352 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
353 RTE_FLOW_ACTION_TYPE_QUEUE,
354 RTE_FLOW_ACTION_TYPE_MARK,
355 RTE_FLOW_ACTION_TYPE_FLAG,
356 RTE_FLOW_ACTION_TYPE_DROP,
357 RTE_FLOW_ACTION_TYPE_RSS,
358 RTE_FLOW_ACTION_TYPE_PASSTHRU,
359 RTE_FLOW_ACTION_TYPE_END,
362 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
363 RTE_FLOW_ACTION_TYPE_QUEUE,
364 RTE_FLOW_ACTION_TYPE_MARK,
365 RTE_FLOW_ACTION_TYPE_FLAG,
366 RTE_FLOW_ACTION_TYPE_DROP,
367 RTE_FLOW_ACTION_TYPE_COUNT,
368 RTE_FLOW_ACTION_TYPE_RSS,
369 RTE_FLOW_ACTION_TYPE_PASSTHRU,
370 RTE_FLOW_ACTION_TYPE_END,
373 /** Action capabilities indexed by NIC version information */
374 static const struct enic_action_cap enic_action_cap[] = {
375 [FILTER_ACTION_RQ_STEERING_FLAG] = {
376 .actions = enic_supported_actions_v1,
377 .copy_fn = enic_copy_action_v1,
379 [FILTER_ACTION_FILTER_ID_FLAG] = {
380 .actions = enic_supported_actions_v2_id,
381 .copy_fn = enic_copy_action_v2,
383 [FILTER_ACTION_DROP_FLAG] = {
384 .actions = enic_supported_actions_v2_drop,
385 .copy_fn = enic_copy_action_v2,
387 [FILTER_ACTION_COUNTER_FLAG] = {
388 .actions = enic_supported_actions_v2_count,
389 .copy_fn = enic_copy_action_v2,
394 mask_exact_match(const u8 *supported, const u8 *supplied,
398 for (i = 0; i < size; i++) {
399 if (supported[i] != supplied[i])
406 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
408 const struct rte_flow_item *item = arg->item;
409 struct filter_v2 *enic_filter = arg->filter;
410 const struct rte_flow_item_ipv4 *spec = item->spec;
411 const struct rte_flow_item_ipv4 *mask = item->mask;
412 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
413 struct ipv4_hdr supported_mask = {
414 .src_addr = 0xffffffff,
415 .dst_addr = 0xffffffff,
421 mask = &rte_flow_item_ipv4_mask;
423 /* This is an exact match filter, both fields must be set */
424 if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
425 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
429 /* check that the suppied mask exactly matches capabilty */
430 if (!mask_exact_match((const u8 *)&supported_mask,
431 (const u8 *)item->mask, sizeof(*mask))) {
432 FLOW_LOG(ERR, "IPv4 exact match mask");
436 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
437 enic_5tup->src_addr = spec->hdr.src_addr;
438 enic_5tup->dst_addr = spec->hdr.dst_addr;
444 enic_copy_item_udp_v1(struct copy_item_args *arg)
446 const struct rte_flow_item *item = arg->item;
447 struct filter_v2 *enic_filter = arg->filter;
448 const struct rte_flow_item_udp *spec = item->spec;
449 const struct rte_flow_item_udp *mask = item->mask;
450 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
451 struct udp_hdr supported_mask = {
459 mask = &rte_flow_item_udp_mask;
461 /* This is an exact match filter, both ports must be set */
462 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
463 FLOW_LOG(ERR, "UDP exact match src/dst addr");
467 /* check that the suppied mask exactly matches capabilty */
468 if (!mask_exact_match((const u8 *)&supported_mask,
469 (const u8 *)item->mask, sizeof(*mask))) {
470 FLOW_LOG(ERR, "UDP exact match mask");
474 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
475 enic_5tup->src_port = spec->hdr.src_port;
476 enic_5tup->dst_port = spec->hdr.dst_port;
477 enic_5tup->protocol = PROTO_UDP;
483 enic_copy_item_tcp_v1(struct copy_item_args *arg)
485 const struct rte_flow_item *item = arg->item;
486 struct filter_v2 *enic_filter = arg->filter;
487 const struct rte_flow_item_tcp *spec = item->spec;
488 const struct rte_flow_item_tcp *mask = item->mask;
489 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
490 struct tcp_hdr supported_mask = {
498 mask = &rte_flow_item_tcp_mask;
500 /* This is an exact match filter, both ports must be set */
501 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
502 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
506 /* check that the suppied mask exactly matches capabilty */
507 if (!mask_exact_match((const u8 *)&supported_mask,
508 (const u8 *)item->mask, sizeof(*mask))) {
509 FLOW_LOG(ERR, "TCP exact match mask");
513 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
514 enic_5tup->src_port = spec->hdr.src_port;
515 enic_5tup->dst_port = spec->hdr.dst_port;
516 enic_5tup->protocol = PROTO_TCP;
522 * The common 'copy' function for all inner packet patterns. Patterns are
523 * first appended to the L5 pattern buffer. Then, since the NIC filter
524 * API has no special support for inner packet matching at the moment,
525 * we set EtherType and IP proto as necessary.
528 copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
529 const void *val, const void *mask, uint8_t val_size,
530 uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
532 uint8_t *l5_mask, *l5_val;
535 /* No space left in the L5 pattern buffer. */
536 start_off = *inner_ofst;
537 if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
539 l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
540 l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
541 /* Copy the pattern into the L5 buffer. */
543 memcpy(l5_mask + start_off, mask, val_size);
544 memcpy(l5_val + start_off, val, val_size);
546 /* Set the protocol field in the previous header. */
550 m = l5_mask + proto_off;
551 v = l5_val + proto_off;
552 if (proto_size == 1) {
553 *(uint8_t *)m = 0xff;
554 *(uint8_t *)v = (uint8_t)proto_val;
555 } else if (proto_size == 2) {
556 *(uint16_t *)m = 0xffff;
557 *(uint16_t *)v = proto_val;
560 /* All inner headers land in L5 buffer even if their spec is null. */
561 *inner_ofst += val_size;
566 enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
568 const void *mask = arg->item->mask;
569 uint8_t *off = arg->inner_ofst;
573 mask = &rte_flow_item_eth_mask;
574 arg->l2_proto_off = *off + offsetof(struct ether_hdr, ether_type);
575 return copy_inner_common(&arg->filter->u.generic_1, off,
576 arg->item->spec, mask, sizeof(struct ether_hdr),
577 0 /* no previous protocol */, 0, 0);
581 enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
583 const void *mask = arg->item->mask;
584 uint8_t *off = arg->inner_ofst;
585 uint8_t eth_type_off;
589 mask = &rte_flow_item_vlan_mask;
590 /* Append vlan header to L5 and set ether type = TPID */
591 eth_type_off = arg->l2_proto_off;
592 arg->l2_proto_off = *off + offsetof(struct vlan_hdr, eth_proto);
593 return copy_inner_common(&arg->filter->u.generic_1, off,
594 arg->item->spec, mask, sizeof(struct vlan_hdr),
595 eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);
599 enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
601 const void *mask = arg->item->mask;
602 uint8_t *off = arg->inner_ofst;
606 mask = &rte_flow_item_ipv4_mask;
607 /* Append ipv4 header to L5 and set ether type = ipv4 */
608 arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
609 return copy_inner_common(&arg->filter->u.generic_1, off,
610 arg->item->spec, mask, sizeof(struct ipv4_hdr),
611 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);
615 enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
617 const void *mask = arg->item->mask;
618 uint8_t *off = arg->inner_ofst;
622 mask = &rte_flow_item_ipv6_mask;
623 /* Append ipv6 header to L5 and set ether type = ipv6 */
624 arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
625 return copy_inner_common(&arg->filter->u.generic_1, off,
626 arg->item->spec, mask, sizeof(struct ipv6_hdr),
627 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);
631 enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
633 const void *mask = arg->item->mask;
634 uint8_t *off = arg->inner_ofst;
638 mask = &rte_flow_item_udp_mask;
639 /* Append udp header to L5 and set ip proto = udp */
640 return copy_inner_common(&arg->filter->u.generic_1, off,
641 arg->item->spec, mask, sizeof(struct udp_hdr),
642 arg->l3_proto_off, IPPROTO_UDP, 1);
646 enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
648 const void *mask = arg->item->mask;
649 uint8_t *off = arg->inner_ofst;
653 mask = &rte_flow_item_tcp_mask;
654 /* Append tcp header to L5 and set ip proto = tcp */
655 return copy_inner_common(&arg->filter->u.generic_1, off,
656 arg->item->spec, mask, sizeof(struct tcp_hdr),
657 arg->l3_proto_off, IPPROTO_TCP, 1);
661 enic_copy_item_eth_v2(struct copy_item_args *arg)
663 const struct rte_flow_item *item = arg->item;
664 struct filter_v2 *enic_filter = arg->filter;
665 struct ether_hdr enic_spec;
666 struct ether_hdr enic_mask;
667 const struct rte_flow_item_eth *spec = item->spec;
668 const struct rte_flow_item_eth *mask = item->mask;
669 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
673 /* Match all if no spec */
678 mask = &rte_flow_item_eth_mask;
680 memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
682 memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
685 memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
687 memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
689 enic_spec.ether_type = spec->type;
690 enic_mask.ether_type = mask->type;
693 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
694 sizeof(struct ether_hdr));
695 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
696 sizeof(struct ether_hdr));
701 enic_copy_item_vlan_v2(struct copy_item_args *arg)
703 const struct rte_flow_item *item = arg->item;
704 struct filter_v2 *enic_filter = arg->filter;
705 const struct rte_flow_item_vlan *spec = item->spec;
706 const struct rte_flow_item_vlan *mask = item->mask;
707 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
708 struct ether_hdr *eth_mask;
709 struct ether_hdr *eth_val;
713 /* Match all if no spec */
718 mask = &rte_flow_item_vlan_mask;
720 eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
721 eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
722 /* Outer TPID cannot be matched */
723 if (eth_mask->ether_type)
726 * When packet matching, the VIC always compares vlan-stripped
727 * L2, regardless of vlan stripping settings. So, the inner type
728 * from vlan becomes the ether type of the eth header.
730 eth_mask->ether_type = mask->inner_type;
731 eth_val->ether_type = spec->inner_type;
732 /* For TCI, use the vlan mask/val fields (little endian). */
733 gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
734 gp->val_vlan = rte_be_to_cpu_16(spec->tci);
739 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
741 const struct rte_flow_item *item = arg->item;
742 struct filter_v2 *enic_filter = arg->filter;
743 const struct rte_flow_item_ipv4 *spec = item->spec;
744 const struct rte_flow_item_ipv4 *mask = item->mask;
745 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
750 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
751 gp->val_flags |= FILTER_GENERIC_1_IPV4;
753 /* Match all if no spec */
758 mask = &rte_flow_item_ipv4_mask;
760 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
761 sizeof(struct ipv4_hdr));
762 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
763 sizeof(struct ipv4_hdr));
768 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
770 const struct rte_flow_item *item = arg->item;
771 struct filter_v2 *enic_filter = arg->filter;
772 const struct rte_flow_item_ipv6 *spec = item->spec;
773 const struct rte_flow_item_ipv6 *mask = item->mask;
774 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
779 gp->mask_flags |= FILTER_GENERIC_1_IPV6;
780 gp->val_flags |= FILTER_GENERIC_1_IPV6;
782 /* Match all if no spec */
787 mask = &rte_flow_item_ipv6_mask;
789 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
790 sizeof(struct ipv6_hdr));
791 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
792 sizeof(struct ipv6_hdr));
797 enic_copy_item_udp_v2(struct copy_item_args *arg)
799 const struct rte_flow_item *item = arg->item;
800 struct filter_v2 *enic_filter = arg->filter;
801 const struct rte_flow_item_udp *spec = item->spec;
802 const struct rte_flow_item_udp *mask = item->mask;
803 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
808 gp->mask_flags |= FILTER_GENERIC_1_UDP;
809 gp->val_flags |= FILTER_GENERIC_1_UDP;
811 /* Match all if no spec */
816 mask = &rte_flow_item_udp_mask;
818 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
819 sizeof(struct udp_hdr));
820 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
821 sizeof(struct udp_hdr));
826 enic_copy_item_tcp_v2(struct copy_item_args *arg)
828 const struct rte_flow_item *item = arg->item;
829 struct filter_v2 *enic_filter = arg->filter;
830 const struct rte_flow_item_tcp *spec = item->spec;
831 const struct rte_flow_item_tcp *mask = item->mask;
832 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
837 gp->mask_flags |= FILTER_GENERIC_1_TCP;
838 gp->val_flags |= FILTER_GENERIC_1_TCP;
840 /* Match all if no spec */
847 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
848 sizeof(struct tcp_hdr));
849 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
850 sizeof(struct tcp_hdr));
855 enic_copy_item_sctp_v2(struct copy_item_args *arg)
857 const struct rte_flow_item *item = arg->item;
858 struct filter_v2 *enic_filter = arg->filter;
859 const struct rte_flow_item_sctp *spec = item->spec;
860 const struct rte_flow_item_sctp *mask = item->mask;
861 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
862 uint8_t *ip_proto_mask = NULL;
863 uint8_t *ip_proto = NULL;
868 * The NIC filter API has no flags for "match sctp", so explicitly set
869 * the protocol number in the IP pattern.
871 if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
873 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
874 ip_proto_mask = &ip->next_proto_id;
875 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
876 ip_proto = &ip->next_proto_id;
877 } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
879 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
880 ip_proto_mask = &ip->proto;
881 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
882 ip_proto = &ip->proto;
884 /* Need IPv4/IPv6 pattern first */
887 *ip_proto = IPPROTO_SCTP;
888 *ip_proto_mask = 0xff;
890 /* Match all if no spec */
895 mask = &rte_flow_item_sctp_mask;
897 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
898 sizeof(struct sctp_hdr));
899 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
900 sizeof(struct sctp_hdr));
905 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
907 const struct rte_flow_item *item = arg->item;
908 struct filter_v2 *enic_filter = arg->filter;
909 uint8_t *inner_ofst = arg->inner_ofst;
910 const struct rte_flow_item_vxlan *spec = item->spec;
911 const struct rte_flow_item_vxlan *mask = item->mask;
912 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
918 * The NIC filter API has no flags for "match vxlan". Set UDP port to
919 * avoid false positives.
921 gp->mask_flags |= FILTER_GENERIC_1_UDP;
922 gp->val_flags |= FILTER_GENERIC_1_UDP;
923 udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
924 udp->dst_port = 0xffff;
925 udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
926 udp->dst_port = RTE_BE16(4789);
927 /* Match all if no spec */
932 mask = &rte_flow_item_vxlan_mask;
934 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
935 sizeof(struct vxlan_hdr));
936 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
937 sizeof(struct vxlan_hdr));
939 *inner_ofst = sizeof(struct vxlan_hdr);
944 * Copy raw item into version 2 NIC filter. Currently, raw pattern match is
945 * very limited. It is intended for matching UDP tunnel header (e.g. vxlan
949 enic_copy_item_raw_v2(struct copy_item_args *arg)
951 const struct rte_flow_item *item = arg->item;
952 struct filter_v2 *enic_filter = arg->filter;
953 uint8_t *inner_ofst = arg->inner_ofst;
954 const struct rte_flow_item_raw *spec = item->spec;
955 const struct rte_flow_item_raw *mask = item->mask;
956 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
960 /* Cannot be used for inner packet */
963 /* Need both spec and mask */
966 /* Only supports relative with offset 0 */
967 if (!spec->relative || spec->offset != 0 || spec->search || spec->limit)
969 /* Need non-null pattern that fits within the NIC's filter pattern */
970 if (spec->length == 0 ||
971 spec->length + sizeof(struct udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
972 !spec->pattern || !mask->pattern)
975 * Mask fields, including length, are often set to zero. Assume that
976 * means "same as spec" to avoid breaking existing apps. If length
977 * is not zero, then it should be >= spec length.
979 * No more pattern follows this, so append to the L4 layer instead of
980 * L5 to work with both recent and older VICs.
982 if (mask->length != 0 && mask->length < spec->length)
984 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
985 mask->pattern, spec->length);
986 memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
987 spec->pattern, spec->length);
993 * Return 1 if current item is valid on top of the previous one.
995 * @param prev_item[in]
996 * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
998 * @param item_info[in]
999 * Info about this item, like valid previous items.
1000 * @param is_first[in]
1001 * True if this the first item in the pattern.
1004 item_stacking_valid(enum rte_flow_item_type prev_item,
1005 const struct enic_items *item_info, u8 is_first_item)
1007 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
1011 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
1012 if (prev_item == *allowed_items)
1016 /* This is the first item in the stack. Check if that's cool */
1017 if (is_first_item && item_info->valid_start_item)
1024 * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
1025 * Instead it is in L4 following the UDP header. Append the vxlan
1026 * pattern to L4 (udp) and shift any inner packet pattern in L5.
1029 fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
1032 uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
1036 if (!(inner_ofst > 0 && enic->vxlan))
1039 vxlan = sizeof(struct vxlan_hdr);
1040 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
1041 gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
1042 memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
1043 gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
1044 inner = inner_ofst - vxlan;
1045 memset(layer, 0, sizeof(layer));
1046 memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
1047 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
1048 memset(layer, 0, sizeof(layer));
1049 memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
1050 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
1054 * Build the intenal enic filter structure from the provided pattern. The
1055 * pattern is validated as the items are copied.
1057 * @param pattern[in]
1058 * @param items_info[in]
1059 * Info about this NICs item support, like valid previous items.
1060 * @param enic_filter[out]
1061 * NIC specfilc filters derived from the pattern.
1065 enic_copy_filter(const struct rte_flow_item pattern[],
1066 const struct enic_filter_cap *cap,
1068 struct filter_v2 *enic_filter,
1069 struct rte_flow_error *error)
1072 const struct rte_flow_item *item = pattern;
1073 u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
1074 enum rte_flow_item_type prev_item;
1075 const struct enic_items *item_info;
1076 struct copy_item_args args;
1077 enic_copy_item_fn *copy_fn;
1078 u8 is_first_item = 1;
1084 args.filter = enic_filter;
1085 args.inner_ofst = &inner_ofst;
1086 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1087 /* Get info about how to validate and copy the item. If NULL
1088 * is returned the nic does not support the item.
1090 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1093 item_info = &cap->item_info[item->type];
1094 if (item->type > cap->max_item_type ||
1095 item_info->copy_item == NULL ||
1096 (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
1097 rte_flow_error_set(error, ENOTSUP,
1098 RTE_FLOW_ERROR_TYPE_ITEM,
1099 NULL, "Unsupported item.");
1103 /* check to see if item stacking is valid */
1104 if (!item_stacking_valid(prev_item, item_info, is_first_item))
1105 goto stacking_error;
1108 copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
1109 item_info->copy_item;
1110 ret = copy_fn(&args);
1112 goto item_not_supported;
1113 prev_item = item->type;
1116 fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1121 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1122 NULL, "enic type error");
1126 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1127 item, "stacking error");
1132 * Build the intenal version 1 NIC action structure from the provided pattern.
1133 * The pattern is validated as the items are copied.
1135 * @param actions[in]
1136 * @param enic_action[out]
1137 * NIC specfilc actions derived from the actions.
1141 enic_copy_action_v1(__rte_unused struct enic *enic,
1142 const struct rte_flow_action actions[],
1143 struct filter_action_v2 *enic_action)
1146 uint32_t overlap = 0;
1150 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1151 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1154 switch (actions->type) {
1155 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1156 const struct rte_flow_action_queue *queue =
1157 (const struct rte_flow_action_queue *)
1163 enic_action->rq_idx =
1164 enic_rte_rq_idx_to_sop_idx(queue->index);
1172 if (!(overlap & FATE))
1174 enic_action->type = FILTER_ACTION_RQ_STEERING;
1179 * Build the intenal version 2 NIC action structure from the provided pattern.
1180 * The pattern is validated as the items are copied.
1182 * @param actions[in]
1183 * @param enic_action[out]
1184 * NIC specfilc actions derived from the actions.
1188 enic_copy_action_v2(struct enic *enic,
1189 const struct rte_flow_action actions[],
1190 struct filter_action_v2 *enic_action)
1192 enum { FATE = 1, MARK = 2, };
1193 uint32_t overlap = 0;
1194 bool passthru = false;
1198 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1199 switch (actions->type) {
1200 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1201 const struct rte_flow_action_queue *queue =
1202 (const struct rte_flow_action_queue *)
1208 enic_action->rq_idx =
1209 enic_rte_rq_idx_to_sop_idx(queue->index);
1210 enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1213 case RTE_FLOW_ACTION_TYPE_MARK: {
1214 const struct rte_flow_action_mark *mark =
1215 (const struct rte_flow_action_mark *)
1222 * Map mark ID (32-bit) to filter ID (16-bit):
1223 * - Reject values > 16 bits
1224 * - Filter ID 0 is reserved for filters that steer
1225 * but not mark. So add 1 to the mark ID to avoid
1227 * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1228 * reserved for the "flag" action below.
1230 if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1232 enic_action->filter_id = mark->id + 1;
1233 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1236 case RTE_FLOW_ACTION_TYPE_FLAG: {
1240 /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1241 enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1242 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1245 case RTE_FLOW_ACTION_TYPE_DROP: {
1249 enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1252 case RTE_FLOW_ACTION_TYPE_COUNT: {
1253 enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1256 case RTE_FLOW_ACTION_TYPE_RSS: {
1257 const struct rte_flow_action_rss *rss =
1258 (const struct rte_flow_action_rss *)
1264 * Hardware does not support general RSS actions, but
1265 * we can still support the dummy one that is used to
1266 * "receive normally".
1268 allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1271 rss->types == enic->rss_hf) &&
1272 rss->queue_num == enic->rq_count &&
1274 /* Identity queue map is ok */
1275 for (i = 0; i < rss->queue_num; i++)
1276 allow = allow && (i == rss->queue[i]);
1281 /* Need MARK or FLAG */
1282 if (!(overlap & MARK))
1287 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1289 * Like RSS above, PASSTHRU + MARK may be used to
1290 * "mark and then receive normally". MARK usually comes
1291 * after PASSTHRU, so remember we have seen passthru
1292 * and check for mark later.
1300 case RTE_FLOW_ACTION_TYPE_VOID:
1307 /* Only PASSTHRU + MARK is allowed */
1308 if (passthru && !(overlap & MARK))
1310 if (!(overlap & FATE))
1312 enic_action->type = FILTER_ACTION_V2;
1316 /** Check if the action is supported */
1318 enic_match_action(const struct rte_flow_action *action,
1319 const enum rte_flow_action_type *supported_actions)
1321 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1322 supported_actions++) {
1323 if (action->type == *supported_actions)
1329 /** Get the NIC filter capabilties structure */
1330 static const struct enic_filter_cap *
1331 enic_get_filter_cap(struct enic *enic)
1333 if (enic->flow_filter_mode)
1334 return &enic_filter_cap[enic->flow_filter_mode];
1339 /** Get the actions for this NIC version. */
1340 static const struct enic_action_cap *
1341 enic_get_action_cap(struct enic *enic)
1343 const struct enic_action_cap *ea;
1346 actions = enic->filter_actions;
1347 if (actions & FILTER_ACTION_COUNTER_FLAG)
1348 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1349 else if (actions & FILTER_ACTION_DROP_FLAG)
1350 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1351 else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1352 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1354 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1358 /* Debug function to dump internal NIC action structure. */
1360 enic_dump_actions(const struct filter_action_v2 *ea)
1362 if (ea->type == FILTER_ACTION_RQ_STEERING) {
1363 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1364 } else if (ea->type == FILTER_ACTION_V2) {
1365 FLOW_LOG(INFO, "Actions(V2)\n");
1366 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1367 FLOW_LOG(INFO, "\tqueue: %u\n",
1368 enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1369 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1370 FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1374 /* Debug function to dump internal NIC filter structure. */
1376 enic_dump_filter(const struct filter_v2 *filt)
1378 const struct filter_generic_1 *gp;
1381 char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1382 char l4csum[16], ipfrag[16];
1384 switch (filt->type) {
1385 case FILTER_IPV4_5TUPLE:
1386 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1388 case FILTER_USNIC_IP:
1390 /* FIXME: this should be a loop */
1391 gp = &filt->u.generic_1;
1392 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1393 gp->val_vlan, gp->mask_vlan);
1395 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1397 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1398 ? "ip4(y)" : "ip4(n)");
1400 sprintf(ip4, "%s ", "ip4(x)");
1402 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1404 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1405 ? "ip6(y)" : "ip6(n)");
1407 sprintf(ip6, "%s ", "ip6(x)");
1409 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1411 (gp->val_flags & FILTER_GENERIC_1_UDP)
1412 ? "udp(y)" : "udp(n)");
1414 sprintf(udp, "%s ", "udp(x)");
1416 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1418 (gp->val_flags & FILTER_GENERIC_1_TCP)
1419 ? "tcp(y)" : "tcp(n)");
1421 sprintf(tcp, "%s ", "tcp(x)");
1423 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1424 sprintf(tcpudp, "%s ",
1425 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1426 ? "tcpudp(y)" : "tcpudp(n)");
1428 sprintf(tcpudp, "%s ", "tcpudp(x)");
1430 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1431 sprintf(ip4csum, "%s ",
1432 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1433 ? "ip4csum(y)" : "ip4csum(n)");
1435 sprintf(ip4csum, "%s ", "ip4csum(x)");
1437 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1438 sprintf(l4csum, "%s ",
1439 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1440 ? "l4csum(y)" : "l4csum(n)");
1442 sprintf(l4csum, "%s ", "l4csum(x)");
1444 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1445 sprintf(ipfrag, "%s ",
1446 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1447 ? "ipfrag(y)" : "ipfrag(n)");
1449 sprintf(ipfrag, "%s ", "ipfrag(x)");
1450 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1451 tcp, tcpudp, ip4csum, l4csum, ipfrag);
1453 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1454 mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1455 while (mbyte && !gp->layer[i].mask[mbyte])
1461 for (j = 0; j <= mbyte; j++) {
1463 gp->layer[i].mask[j]);
1467 FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1469 for (j = 0; j <= mbyte; j++) {
1471 gp->layer[i].val[j]);
1475 FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
1479 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1484 /* Debug function to dump internal NIC flow structures. */
1486 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1488 enic_dump_filter(filt);
1489 enic_dump_actions(ea);
1494 * Internal flow parse/validate function.
1497 * This device pointer.
1498 * @param pattern[in]
1499 * @param actions[in]
1501 * @param enic_filter[out]
1502 * Internal NIC filter structure pointer.
1503 * @param enic_action[out]
1504 * Internal NIC action structure pointer.
1507 enic_flow_parse(struct rte_eth_dev *dev,
1508 const struct rte_flow_attr *attrs,
1509 const struct rte_flow_item pattern[],
1510 const struct rte_flow_action actions[],
1511 struct rte_flow_error *error,
1512 struct filter_v2 *enic_filter,
1513 struct filter_action_v2 *enic_action)
1515 unsigned int ret = 0;
1516 struct enic *enic = pmd_priv(dev);
1517 const struct enic_filter_cap *enic_filter_cap;
1518 const struct enic_action_cap *enic_action_cap;
1519 const struct rte_flow_action *action;
1523 memset(enic_filter, 0, sizeof(*enic_filter));
1524 memset(enic_action, 0, sizeof(*enic_action));
1527 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1528 NULL, "No pattern specified");
1533 rte_flow_error_set(error, EINVAL,
1534 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1535 NULL, "No action specified");
1541 rte_flow_error_set(error, ENOTSUP,
1542 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1544 "priority groups are not supported");
1546 } else if (attrs->priority) {
1547 rte_flow_error_set(error, ENOTSUP,
1548 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1550 "priorities are not supported");
1552 } else if (attrs->egress) {
1553 rte_flow_error_set(error, ENOTSUP,
1554 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1556 "egress is not supported");
1558 } else if (attrs->transfer) {
1559 rte_flow_error_set(error, ENOTSUP,
1560 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1562 "transfer is not supported");
1564 } else if (!attrs->ingress) {
1565 rte_flow_error_set(error, ENOTSUP,
1566 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1568 "only ingress is supported");
1573 rte_flow_error_set(error, EINVAL,
1574 RTE_FLOW_ERROR_TYPE_ATTR,
1575 NULL, "No attribute specified");
1579 /* Verify Actions. */
1580 enic_action_cap = enic_get_action_cap(enic);
1581 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1583 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1585 else if (!enic_match_action(action, enic_action_cap->actions))
1588 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1589 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1590 action, "Invalid action.");
1593 ret = enic_action_cap->copy_fn(enic, actions, enic_action);
1595 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1596 NULL, "Unsupported action.");
1600 /* Verify Flow items. If copying the filter from flow format to enic
1601 * format fails, the flow is not supported
1603 enic_filter_cap = enic_get_filter_cap(enic);
1604 if (enic_filter_cap == NULL) {
1605 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1606 NULL, "Flow API not available");
1609 enic_filter->type = enic->flow_filter_mode;
1610 ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1611 enic_filter, error);
1616 * Push filter/action to the NIC.
1619 * Device structure pointer.
1620 * @param enic_filter[in]
1621 * Internal NIC filter structure pointer.
1622 * @param enic_action[in]
1623 * Internal NIC action structure pointer.
1626 static struct rte_flow *
1627 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1628 struct filter_action_v2 *enic_action,
1629 struct rte_flow_error *error)
1631 struct rte_flow *flow;
1635 int last_max_flow_ctr;
1639 flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1641 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1642 NULL, "cannot allocate flow memory");
1646 flow->counter_idx = -1;
1647 last_max_flow_ctr = -1;
1648 if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1649 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1650 rte_flow_error_set(error, ENOMEM,
1651 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1652 NULL, "cannot allocate counter");
1653 goto unwind_flow_alloc;
1655 flow->counter_idx = ctr_idx;
1656 enic_action->counter_index = ctr_idx;
1658 /* If index is the largest, increase the counter DMA size */
1659 if (ctr_idx > enic->max_flow_counter) {
1660 err = vnic_dev_counter_dma_cfg(enic->vdev,
1661 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1664 rte_flow_error_set(error, -err,
1665 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1666 NULL, "counter DMA config failed");
1667 goto unwind_ctr_alloc;
1669 last_max_flow_ctr = enic->max_flow_counter;
1670 enic->max_flow_counter = ctr_idx;
1674 /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1675 entry = enic_action->rq_idx;
1676 err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1679 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1680 NULL, "vnic_dev_classifier error");
1681 goto unwind_ctr_dma_cfg;
1684 flow->enic_filter_id = entry;
1685 flow->enic_filter = *enic_filter;
1689 /* unwind if there are errors */
1691 if (last_max_flow_ctr != -1) {
1692 /* reduce counter DMA size */
1693 vnic_dev_counter_dma_cfg(enic->vdev,
1694 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1695 last_max_flow_ctr + 1);
1696 enic->max_flow_counter = last_max_flow_ctr;
1699 if (flow->counter_idx != -1)
1700 vnic_dev_counter_free(enic->vdev, ctr_idx);
1707 * Remove filter/action from the NIC.
1710 * Device structure pointer.
1711 * @param filter_id[in]
1713 * @param enic_action[in]
1714 * Internal NIC action structure pointer.
1718 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1719 struct rte_flow_error *error)
1726 filter_id = flow->enic_filter_id;
1727 err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1729 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1730 NULL, "vnic_dev_classifier failed");
1734 if (flow->counter_idx != -1) {
1735 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1736 dev_err(enic, "counter free failed, idx: %d\n",
1738 flow->counter_idx = -1;
1744 * The following functions are callbacks for Generic flow API.
1748 * Validate a flow supported by the NIC.
1750 * @see rte_flow_validate()
1754 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1755 const struct rte_flow_item pattern[],
1756 const struct rte_flow_action actions[],
1757 struct rte_flow_error *error)
1759 struct filter_v2 enic_filter;
1760 struct filter_action_v2 enic_action;
1765 ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1766 &enic_filter, &enic_action);
1768 enic_dump_flow(&enic_action, &enic_filter);
1773 * Create a flow supported by the NIC.
1775 * @see rte_flow_create()
1778 static struct rte_flow *
1779 enic_flow_create(struct rte_eth_dev *dev,
1780 const struct rte_flow_attr *attrs,
1781 const struct rte_flow_item pattern[],
1782 const struct rte_flow_action actions[],
1783 struct rte_flow_error *error)
1786 struct filter_v2 enic_filter;
1787 struct filter_action_v2 enic_action;
1788 struct rte_flow *flow;
1789 struct enic *enic = pmd_priv(dev);
1793 ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1798 rte_spinlock_lock(&enic->flows_lock);
1799 flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1802 LIST_INSERT_HEAD(&enic->flows, flow, next);
1803 rte_spinlock_unlock(&enic->flows_lock);
1809 * Destroy a flow supported by the NIC.
1811 * @see rte_flow_destroy()
1815 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1816 __rte_unused struct rte_flow_error *error)
1818 struct enic *enic = pmd_priv(dev);
1822 rte_spinlock_lock(&enic->flows_lock);
1823 enic_flow_del_filter(enic, flow, error);
1824 LIST_REMOVE(flow, next);
1825 rte_spinlock_unlock(&enic->flows_lock);
1831 * Flush all flows on the device.
1833 * @see rte_flow_flush()
1837 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1839 struct rte_flow *flow;
1840 struct enic *enic = pmd_priv(dev);
1844 rte_spinlock_lock(&enic->flows_lock);
1846 while (!LIST_EMPTY(&enic->flows)) {
1847 flow = LIST_FIRST(&enic->flows);
1848 enic_flow_del_filter(enic, flow, error);
1849 LIST_REMOVE(flow, next);
1852 rte_spinlock_unlock(&enic->flows_lock);
1857 enic_flow_query_count(struct rte_eth_dev *dev,
1858 struct rte_flow *flow, void *data,
1859 struct rte_flow_error *error)
1861 struct enic *enic = pmd_priv(dev);
1862 struct rte_flow_query_count *query;
1863 uint64_t packets, bytes;
1867 if (flow->counter_idx == -1) {
1868 return rte_flow_error_set(error, ENOTSUP,
1869 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1871 "flow does not have counter");
1873 query = (struct rte_flow_query_count *)data;
1874 if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1875 !!query->reset, &packets, &bytes)) {
1876 return rte_flow_error_set
1878 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1880 "cannot read counter");
1882 query->hits_set = 1;
1883 query->bytes_set = 1;
1884 query->hits = packets;
1885 query->bytes = bytes;
1890 enic_flow_query(struct rte_eth_dev *dev,
1891 struct rte_flow *flow,
1892 const struct rte_flow_action *actions,
1894 struct rte_flow_error *error)
1900 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1901 switch (actions->type) {
1902 case RTE_FLOW_ACTION_TYPE_VOID:
1904 case RTE_FLOW_ACTION_TYPE_COUNT:
1905 ret = enic_flow_query_count(dev, flow, data, error);
1908 return rte_flow_error_set(error, ENOTSUP,
1909 RTE_FLOW_ERROR_TYPE_ACTION,
1911 "action not supported");
1920 * Flow callback registration.
1924 const struct rte_flow_ops enic_flow_ops = {
1925 .validate = enic_flow_validate,
1926 .create = enic_flow_create,
1927 .destroy = enic_flow_destroy,
1928 .flush = enic_flow_flush,
1929 .query = enic_flow_query,