1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2019 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
13 #include <rte_memzone.h>
15 #include "enic_compat.h"
20 #define IP_DEFTTL 64 /* from RFC 1340. */
21 #define IP6_VTC_FLOW 0x60000000
23 /* Highest Item type supported by Flowman */
24 #define FM_MAX_ITEM_TYPE RTE_FLOW_ITEM_TYPE_VXLAN
26 /* Up to 1024 TCAM entries */
27 #define FM_MAX_TCAM_TABLE_SIZE 1024
29 /* Up to 4096 entries per exact match table */
30 #define FM_MAX_EXACT_TABLE_SIZE 4096
32 /* Number of counters to increase on for each increment */
33 #define FM_COUNTERS_EXPAND 100
35 #define FM_INVALID_HANDLE 0
38 * Flow exact match tables (FET) in the VIC and rte_flow groups.
39 * Use a simple scheme to map groups to tables.
40 * Group 0 uses the single TCAM tables, one for each direction.
41 * Group 1, 2, ... uses its own exact match table.
43 * The TCAM tables are allocated upfront during init.
45 * Exact match tables are allocated on demand. 3 paths that lead allocations.
47 * 1. Add a flow that jumps from group 0 to group N.
49 * If N does not exist, we allocate an exact match table for it, using
50 * a dummy key. A key is required for the table.
52 * 2. Add a flow that uses group N.
54 * If N does not exist, we allocate an exact match table for it, using
55 * the flow's key. Subsequent flows to the same group all should have
58 * Without a jump flow to N, N is not reachable in hardware. No packets
61 * 3. Add a flow to an empty group N.
63 * N has been created via (1) and the dummy key. We free that table, allocate
64 * a new table using the new flow's key. Also re-do the existing jump flow to
65 * point to the new table.
67 #define FM_TCAM_RTE_GROUP 0
70 TAILQ_ENTRY(enic_fm_fet) list;
71 uint32_t group; /* rte_flow group ID */
72 uint64_t handle; /* Exact match table handle from flowman */
75 int ref; /* Reference count via get/put */
76 struct fm_key_template key; /* Key associated with the table */
79 struct enic_fm_counter {
80 SLIST_ENTRY(enic_fm_counter) next;
87 uint64_t entry_handle;
88 uint64_t action_handle;
89 struct enic_fm_counter *counter;
90 struct enic_fm_fet *fet;
93 struct enic_fm_jump_flow {
94 TAILQ_ENTRY(enic_fm_jump_flow) list;
95 struct rte_flow *flow;
97 struct fm_tcam_match_entry match;
98 struct fm_action action;
102 * Flowman uses host memory for commands. This structure is allocated
103 * in DMA-able memory.
105 union enic_flowman_cmd_mem {
106 struct fm_tcam_match_table fm_tcam_match_table;
107 struct fm_exact_match_table fm_exact_match_table;
108 struct fm_tcam_match_entry fm_tcam_match_entry;
109 struct fm_exact_match_entry fm_exact_match_entry;
110 struct fm_action fm_action;
113 struct enic_flowman {
117 union enic_flowman_cmd_mem *va;
120 /* TCAM tables allocated upfront, used for group 0 */
121 uint64_t ig_tcam_hndl;
122 uint64_t eg_tcam_hndl;
124 SLIST_HEAD(enic_free_counters, enic_fm_counter) counters;
126 uint32_t counters_alloced;
127 /* Exact match tables for groups != 0, dynamically allocated */
128 TAILQ_HEAD(fet_list, enic_fm_fet) fet_list;
130 * Default exact match tables used for jump actions to
131 * non-existent groups.
133 struct enic_fm_fet *default_eg_fet;
134 struct enic_fm_fet *default_ig_fet;
135 /* Flows that jump to the default table above */
136 TAILQ_HEAD(jump_flow_list, enic_fm_jump_flow) jump_list;
138 * Scratch data used during each invocation of flow_create
141 struct enic_fm_fet *fet;
142 struct fm_tcam_match_entry tcam_entry;
143 struct fm_action action;
144 struct fm_action action_tmp; /* enic_fm_reorder_action_op */
148 static int enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle);
151 * Common arguments passed to copy_item functions. Use this structure
152 * so we can easily add new arguments.
153 * item: Item specification.
154 * fm_tcam_entry: Flowman TCAM match entry.
155 * header_level: 0 for outer header, 1 for inner header.
157 struct copy_item_args {
158 const struct rte_flow_item *item;
159 struct fm_tcam_match_entry *fm_tcam_entry;
160 uint8_t header_level;
163 /* functions for copying items into flowman match */
164 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
166 /* Info about how to copy items into flowman match */
167 struct enic_fm_items {
168 /* Function for copying and validating an item. */
169 enic_copy_item_fn * const copy_item;
170 /* List of valid previous items. */
171 const enum rte_flow_item_type * const prev_items;
173 * True if it's OK for this item to be the first item. For some NIC
174 * versions, it's invalid to start the stack above layer 3.
176 const uint8_t valid_start_item;
179 static enic_copy_item_fn enic_fm_copy_item_eth;
180 static enic_copy_item_fn enic_fm_copy_item_ipv4;
181 static enic_copy_item_fn enic_fm_copy_item_ipv6;
182 static enic_copy_item_fn enic_fm_copy_item_raw;
183 static enic_copy_item_fn enic_fm_copy_item_sctp;
184 static enic_copy_item_fn enic_fm_copy_item_tcp;
185 static enic_copy_item_fn enic_fm_copy_item_udp;
186 static enic_copy_item_fn enic_fm_copy_item_vlan;
187 static enic_copy_item_fn enic_fm_copy_item_vxlan;
189 /* Ingress actions */
190 static const enum rte_flow_action_type enic_fm_supported_ig_actions[] = {
191 RTE_FLOW_ACTION_TYPE_COUNT,
192 RTE_FLOW_ACTION_TYPE_DROP,
193 RTE_FLOW_ACTION_TYPE_FLAG,
194 RTE_FLOW_ACTION_TYPE_JUMP,
195 RTE_FLOW_ACTION_TYPE_MARK,
196 RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
197 RTE_FLOW_ACTION_TYPE_PORT_ID,
198 RTE_FLOW_ACTION_TYPE_PASSTHRU,
199 RTE_FLOW_ACTION_TYPE_QUEUE,
200 RTE_FLOW_ACTION_TYPE_RSS,
201 RTE_FLOW_ACTION_TYPE_VOID,
202 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
203 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
204 RTE_FLOW_ACTION_TYPE_END, /* END must be the last entry */
208 static const enum rte_flow_action_type enic_fm_supported_eg_actions[] = {
209 RTE_FLOW_ACTION_TYPE_COUNT,
210 RTE_FLOW_ACTION_TYPE_DROP,
211 RTE_FLOW_ACTION_TYPE_JUMP,
212 RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
213 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
214 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
215 RTE_FLOW_ACTION_TYPE_PASSTHRU,
216 RTE_FLOW_ACTION_TYPE_VOID,
217 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
218 RTE_FLOW_ACTION_TYPE_END,
221 static const struct enic_fm_items enic_fm_items[] = {
222 [RTE_FLOW_ITEM_TYPE_RAW] = {
223 .copy_item = enic_fm_copy_item_raw,
224 .valid_start_item = 0,
225 .prev_items = (const enum rte_flow_item_type[]) {
226 RTE_FLOW_ITEM_TYPE_UDP,
227 RTE_FLOW_ITEM_TYPE_END,
230 [RTE_FLOW_ITEM_TYPE_ETH] = {
231 .copy_item = enic_fm_copy_item_eth,
232 .valid_start_item = 1,
233 .prev_items = (const enum rte_flow_item_type[]) {
234 RTE_FLOW_ITEM_TYPE_END,
237 [RTE_FLOW_ITEM_TYPE_VLAN] = {
238 .copy_item = enic_fm_copy_item_vlan,
239 .valid_start_item = 1,
240 .prev_items = (const enum rte_flow_item_type[]) {
241 RTE_FLOW_ITEM_TYPE_ETH,
242 RTE_FLOW_ITEM_TYPE_END,
245 [RTE_FLOW_ITEM_TYPE_IPV4] = {
246 .copy_item = enic_fm_copy_item_ipv4,
247 .valid_start_item = 1,
248 .prev_items = (const enum rte_flow_item_type[]) {
249 RTE_FLOW_ITEM_TYPE_ETH,
250 RTE_FLOW_ITEM_TYPE_VLAN,
251 RTE_FLOW_ITEM_TYPE_END,
254 [RTE_FLOW_ITEM_TYPE_IPV6] = {
255 .copy_item = enic_fm_copy_item_ipv6,
256 .valid_start_item = 1,
257 .prev_items = (const enum rte_flow_item_type[]) {
258 RTE_FLOW_ITEM_TYPE_ETH,
259 RTE_FLOW_ITEM_TYPE_VLAN,
260 RTE_FLOW_ITEM_TYPE_END,
263 [RTE_FLOW_ITEM_TYPE_UDP] = {
264 .copy_item = enic_fm_copy_item_udp,
265 .valid_start_item = 1,
266 .prev_items = (const enum rte_flow_item_type[]) {
267 RTE_FLOW_ITEM_TYPE_IPV4,
268 RTE_FLOW_ITEM_TYPE_IPV6,
269 RTE_FLOW_ITEM_TYPE_END,
272 [RTE_FLOW_ITEM_TYPE_TCP] = {
273 .copy_item = enic_fm_copy_item_tcp,
274 .valid_start_item = 1,
275 .prev_items = (const enum rte_flow_item_type[]) {
276 RTE_FLOW_ITEM_TYPE_IPV4,
277 RTE_FLOW_ITEM_TYPE_IPV6,
278 RTE_FLOW_ITEM_TYPE_END,
281 [RTE_FLOW_ITEM_TYPE_SCTP] = {
282 .copy_item = enic_fm_copy_item_sctp,
283 .valid_start_item = 0,
284 .prev_items = (const enum rte_flow_item_type[]) {
285 RTE_FLOW_ITEM_TYPE_IPV4,
286 RTE_FLOW_ITEM_TYPE_IPV6,
287 RTE_FLOW_ITEM_TYPE_END,
290 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
291 .copy_item = enic_fm_copy_item_vxlan,
292 .valid_start_item = 1,
293 .prev_items = (const enum rte_flow_item_type[]) {
294 RTE_FLOW_ITEM_TYPE_UDP,
295 RTE_FLOW_ITEM_TYPE_END,
301 enic_fm_copy_item_eth(struct copy_item_args *arg)
303 const struct rte_flow_item *item = arg->item;
304 const struct rte_flow_item_eth *spec = item->spec;
305 const struct rte_flow_item_eth *mask = item->mask;
306 const uint8_t lvl = arg->header_level;
307 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
308 struct fm_header_set *fm_data, *fm_mask;
310 ENICPMD_FUNC_TRACE();
311 /* Match all if no spec */
315 mask = &rte_flow_item_eth_mask;
316 fm_data = &entry->ftm_data.fk_hdrset[lvl];
317 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
318 fm_data->fk_header_select |= FKH_ETHER;
319 fm_mask->fk_header_select |= FKH_ETHER;
320 memcpy(&fm_data->l2.eth, spec, sizeof(*spec));
321 memcpy(&fm_mask->l2.eth, mask, sizeof(*mask));
326 enic_fm_copy_item_vlan(struct copy_item_args *arg)
328 const struct rte_flow_item *item = arg->item;
329 const struct rte_flow_item_vlan *spec = item->spec;
330 const struct rte_flow_item_vlan *mask = item->mask;
331 const uint8_t lvl = arg->header_level;
332 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
333 struct fm_header_set *fm_data, *fm_mask;
334 struct rte_ether_hdr *eth_mask;
335 struct rte_ether_hdr *eth_val;
338 ENICPMD_FUNC_TRACE();
339 fm_data = &entry->ftm_data.fk_hdrset[lvl];
340 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
341 /* Outer and inner packet vlans need different flags */
342 meta = FKM_VLAN_PRES;
345 fm_data->fk_metadata |= meta;
346 fm_mask->fk_metadata |= meta;
348 /* Match all if no spec */
352 mask = &rte_flow_item_vlan_mask;
354 eth_mask = (void *)&fm_mask->l2.eth;
355 eth_val = (void *)&fm_data->l2.eth;
357 /* Outer TPID cannot be matched */
358 if (eth_mask->ether_type)
362 * When packet matching, the VIC always compares vlan-stripped
363 * L2, regardless of vlan stripping settings. So, the inner type
364 * from vlan becomes the ether type of the eth header.
366 eth_mask->ether_type = mask->inner_type;
367 eth_val->ether_type = spec->inner_type;
368 fm_data->fk_header_select |= FKH_ETHER | FKH_QTAG;
369 fm_mask->fk_header_select |= FKH_ETHER | FKH_QTAG;
370 fm_data->fk_vlan = rte_be_to_cpu_16(spec->tci);
371 fm_mask->fk_vlan = rte_be_to_cpu_16(mask->tci);
376 enic_fm_copy_item_ipv4(struct copy_item_args *arg)
378 const struct rte_flow_item *item = arg->item;
379 const struct rte_flow_item_ipv4 *spec = item->spec;
380 const struct rte_flow_item_ipv4 *mask = item->mask;
381 const uint8_t lvl = arg->header_level;
382 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
383 struct fm_header_set *fm_data, *fm_mask;
385 ENICPMD_FUNC_TRACE();
386 fm_data = &entry->ftm_data.fk_hdrset[lvl];
387 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
388 fm_data->fk_metadata |= FKM_IPV4;
389 fm_mask->fk_metadata |= FKM_IPV4;
394 mask = &rte_flow_item_ipv4_mask;
396 fm_data->fk_header_select |= FKH_IPV4;
397 fm_mask->fk_header_select |= FKH_IPV4;
398 memcpy(&fm_data->l3.ip4, spec, sizeof(*spec));
399 memcpy(&fm_mask->l3.ip4, mask, sizeof(*mask));
404 enic_fm_copy_item_ipv6(struct copy_item_args *arg)
406 const struct rte_flow_item *item = arg->item;
407 const struct rte_flow_item_ipv6 *spec = item->spec;
408 const struct rte_flow_item_ipv6 *mask = item->mask;
409 const uint8_t lvl = arg->header_level;
410 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
411 struct fm_header_set *fm_data, *fm_mask;
413 ENICPMD_FUNC_TRACE();
414 fm_data = &entry->ftm_data.fk_hdrset[lvl];
415 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
416 fm_data->fk_metadata |= FKM_IPV6;
417 fm_mask->fk_metadata |= FKM_IPV6;
422 mask = &rte_flow_item_ipv6_mask;
424 fm_data->fk_header_select |= FKH_IPV6;
425 fm_mask->fk_header_select |= FKH_IPV6;
426 memcpy(&fm_data->l3.ip6, spec, sizeof(*spec));
427 memcpy(&fm_mask->l3.ip6, mask, sizeof(*mask));
432 enic_fm_copy_item_udp(struct copy_item_args *arg)
434 const struct rte_flow_item *item = arg->item;
435 const struct rte_flow_item_udp *spec = item->spec;
436 const struct rte_flow_item_udp *mask = item->mask;
437 const uint8_t lvl = arg->header_level;
438 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
439 struct fm_header_set *fm_data, *fm_mask;
441 ENICPMD_FUNC_TRACE();
442 fm_data = &entry->ftm_data.fk_hdrset[lvl];
443 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
444 fm_data->fk_metadata |= FKM_UDP;
445 fm_mask->fk_metadata |= FKM_UDP;
450 mask = &rte_flow_item_udp_mask;
452 fm_data->fk_header_select |= FKH_UDP;
453 fm_mask->fk_header_select |= FKH_UDP;
454 memcpy(&fm_data->l4.udp, spec, sizeof(*spec));
455 memcpy(&fm_mask->l4.udp, mask, sizeof(*mask));
460 enic_fm_copy_item_tcp(struct copy_item_args *arg)
462 const struct rte_flow_item *item = arg->item;
463 const struct rte_flow_item_tcp *spec = item->spec;
464 const struct rte_flow_item_tcp *mask = item->mask;
465 const uint8_t lvl = arg->header_level;
466 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
467 struct fm_header_set *fm_data, *fm_mask;
469 ENICPMD_FUNC_TRACE();
470 fm_data = &entry->ftm_data.fk_hdrset[lvl];
471 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
472 fm_data->fk_metadata |= FKM_TCP;
473 fm_mask->fk_metadata |= FKM_TCP;
478 mask = &rte_flow_item_tcp_mask;
480 fm_data->fk_header_select |= FKH_TCP;
481 fm_mask->fk_header_select |= FKH_TCP;
482 memcpy(&fm_data->l4.tcp, spec, sizeof(*spec));
483 memcpy(&fm_mask->l4.tcp, mask, sizeof(*mask));
488 enic_fm_copy_item_sctp(struct copy_item_args *arg)
490 const struct rte_flow_item *item = arg->item;
491 const struct rte_flow_item_sctp *spec = item->spec;
492 const struct rte_flow_item_sctp *mask = item->mask;
493 const uint8_t lvl = arg->header_level;
494 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
495 struct fm_header_set *fm_data, *fm_mask;
496 uint8_t *ip_proto_mask = NULL;
497 uint8_t *ip_proto = NULL;
500 ENICPMD_FUNC_TRACE();
501 fm_data = &entry->ftm_data.fk_hdrset[lvl];
502 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
504 * The NIC filter API has no flags for "match sctp", so explicitly
505 * set the protocol number in the IP pattern.
507 if (fm_data->fk_metadata & FKM_IPV4) {
508 struct rte_ipv4_hdr *ip;
509 ip = (struct rte_ipv4_hdr *)&fm_mask->l3.ip4;
510 ip_proto_mask = &ip->next_proto_id;
511 ip = (struct rte_ipv4_hdr *)&fm_data->l3.ip4;
512 ip_proto = &ip->next_proto_id;
514 } else if (fm_data->fk_metadata & FKM_IPV6) {
515 struct rte_ipv6_hdr *ip;
516 ip = (struct rte_ipv6_hdr *)&fm_mask->l3.ip6;
517 ip_proto_mask = &ip->proto;
518 ip = (struct rte_ipv6_hdr *)&fm_data->l3.ip6;
519 ip_proto = &ip->proto;
522 /* Need IPv4/IPv6 pattern first */
525 *ip_proto = IPPROTO_SCTP;
526 *ip_proto_mask = 0xff;
527 fm_data->fk_header_select |= l3_fkh;
528 fm_mask->fk_header_select |= l3_fkh;
533 mask = &rte_flow_item_sctp_mask;
535 fm_data->fk_header_select |= FKH_L4RAW;
536 fm_mask->fk_header_select |= FKH_L4RAW;
537 memcpy(fm_data->l4.rawdata, spec, sizeof(*spec));
538 memcpy(fm_mask->l4.rawdata, mask, sizeof(*mask));
543 enic_fm_copy_item_vxlan(struct copy_item_args *arg)
545 const struct rte_flow_item *item = arg->item;
546 const struct rte_flow_item_vxlan *spec = item->spec;
547 const struct rte_flow_item_vxlan *mask = item->mask;
548 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
549 struct fm_header_set *fm_data, *fm_mask;
551 ENICPMD_FUNC_TRACE();
552 /* Only 2 header levels (outer and inner) allowed */
553 if (arg->header_level > 0)
556 fm_data = &entry->ftm_data.fk_hdrset[0];
557 fm_mask = &entry->ftm_mask.fk_hdrset[0];
558 fm_data->fk_metadata |= FKM_VXLAN;
559 fm_mask->fk_metadata |= FKM_VXLAN;
560 /* items from here on out are inner header items */
561 arg->header_level = 1;
563 /* Match all if no spec */
567 mask = &rte_flow_item_vxlan_mask;
569 fm_data->fk_header_select |= FKH_VXLAN;
570 fm_mask->fk_header_select |= FKH_VXLAN;
571 memcpy(&fm_data->vxlan, spec, sizeof(*spec));
572 memcpy(&fm_mask->vxlan, mask, sizeof(*mask));
577 * Currently, raw pattern match is very limited. It is intended for matching
578 * UDP tunnel header (e.g. vxlan or geneve).
581 enic_fm_copy_item_raw(struct copy_item_args *arg)
583 const struct rte_flow_item *item = arg->item;
584 const struct rte_flow_item_raw *spec = item->spec;
585 const struct rte_flow_item_raw *mask = item->mask;
586 const uint8_t lvl = arg->header_level;
587 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
588 struct fm_header_set *fm_data, *fm_mask;
590 ENICPMD_FUNC_TRACE();
591 /* Cannot be used for inner packet */
594 /* Need both spec and mask */
597 /* Only supports relative with offset 0 */
598 if (!spec->relative || spec->offset != 0 || spec->search ||
601 /* Need non-null pattern that fits within the NIC's filter pattern */
602 if (spec->length == 0 ||
603 spec->length + sizeof(struct rte_udp_hdr) > FM_LAYER_SIZE ||
604 !spec->pattern || !mask->pattern)
607 * Mask fields, including length, are often set to zero. Assume that
608 * means "same as spec" to avoid breaking existing apps. If length
609 * is not zero, then it should be >= spec length.
611 * No more pattern follows this, so append to the L4 layer instead of
612 * L5 to work with both recent and older VICs.
614 if (mask->length != 0 && mask->length < spec->length)
617 fm_data = &entry->ftm_data.fk_hdrset[lvl];
618 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
619 fm_data->fk_header_select |= FKH_L4RAW;
620 fm_mask->fk_header_select |= FKH_L4RAW;
621 fm_data->fk_header_select &= ~FKH_UDP;
622 fm_mask->fk_header_select &= ~FKH_UDP;
623 memcpy(fm_data->l4.rawdata + sizeof(struct rte_udp_hdr),
624 spec->pattern, spec->length);
625 memcpy(fm_mask->l4.rawdata + sizeof(struct rte_udp_hdr),
626 mask->pattern, spec->length);
631 enic_fet_alloc(struct enic_flowman *fm, uint8_t ingress,
632 struct fm_key_template *key, int entries,
633 struct enic_fm_fet **fet_out)
635 struct fm_exact_match_table *cmd;
636 struct fm_header_set *hdr;
637 struct enic_fm_fet *fet;
641 ENICPMD_FUNC_TRACE();
642 fet = calloc(1, sizeof(struct enic_fm_fet));
645 cmd = &fm->cmd.va->fm_exact_match_table;
646 memset(cmd, 0, sizeof(*cmd));
647 cmd->fet_direction = ingress ? FM_INGRESS : FM_EGRESS;
648 cmd->fet_stage = FM_STAGE_LAST;
649 cmd->fet_max_entries = entries ? entries : FM_MAX_EXACT_TABLE_SIZE;
651 hdr = &cmd->fet_key.fk_hdrset[0];
652 memset(hdr, 0, sizeof(*hdr));
653 hdr->fk_header_select = FKH_IPV4 | FKH_UDP;
654 hdr->l3.ip4.fk_saddr = 0xFFFFFFFF;
655 hdr->l3.ip4.fk_daddr = 0xFFFFFFFF;
656 hdr->l4.udp.fk_source = 0xFFFF;
657 hdr->l4.udp.fk_dest = 0xFFFF;
658 fet->default_key = 1;
660 memcpy(&cmd->fet_key, key, sizeof(*key));
661 memcpy(&fet->key, key, sizeof(*key));
662 fet->default_key = 0;
664 cmd->fet_key.fk_packet_tag = 1;
666 args[0] = FM_EXACT_TABLE_ALLOC;
667 args[1] = fm->cmd.pa;
668 ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
670 ENICPMD_LOG(ERR, "cannot alloc exact match table: rc=%d", ret);
674 fet->handle = args[0];
675 fet->ingress = ingress;
676 ENICPMD_LOG(DEBUG, "allocated exact match table: handle=0x%" PRIx64,
683 enic_fet_free(struct enic_flowman *fm, struct enic_fm_fet *fet)
685 ENICPMD_FUNC_TRACE();
686 enic_fm_tbl_free(fm, fet->handle);
687 if (!fet->default_key)
688 TAILQ_REMOVE(&fm->fet_list, fet, list);
693 * Get the exact match table for the given combination of
694 * <group, ingress, key>. Allocate one on the fly as necessary.
697 enic_fet_get(struct enic_flowman *fm,
700 struct fm_key_template *key,
701 struct enic_fm_fet **fet_out,
702 struct rte_flow_error *error)
704 struct enic_fm_fet *fet;
706 ENICPMD_FUNC_TRACE();
707 /* See if we already have this table open */
708 TAILQ_FOREACH(fet, &fm->fet_list, list) {
709 if (fet->group == group && fet->ingress == ingress)
713 /* Jumping to a non-existing group? Use the default table */
715 fet = ingress ? fm->default_ig_fet : fm->default_eg_fet;
716 } else if (enic_fet_alloc(fm, ingress, key, 0, &fet)) {
717 return rte_flow_error_set(error, EINVAL,
718 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
719 NULL, "enic: cannot get exact match table");
722 /* Default table is never on the open table list */
723 if (!fet->default_key)
724 TAILQ_INSERT_HEAD(&fm->fet_list, fet, list);
728 ENICPMD_LOG(DEBUG, "fet_get: %s %s group=%u ref=%u",
729 fet->default_key ? "default" : "",
730 fet->ingress ? "ingress" : "egress",
731 fet->group, fet->ref);
736 enic_fet_put(struct enic_flowman *fm, struct enic_fm_fet *fet)
738 ENICPMD_FUNC_TRACE();
739 RTE_ASSERT(fet->ref > 0);
741 ENICPMD_LOG(DEBUG, "fet_put: %s %s group=%u ref=%u",
742 fet->default_key ? "default" : "",
743 fet->ingress ? "ingress" : "egress",
744 fet->group, fet->ref);
746 enic_fet_free(fm, fet);
749 /* Return 1 if current item is valid on top of the previous one. */
751 fm_item_stacking_valid(enum rte_flow_item_type prev_item,
752 const struct enic_fm_items *item_info,
753 uint8_t is_first_item)
755 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
757 ENICPMD_FUNC_TRACE();
758 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
759 if (prev_item == *allowed_items)
763 /* This is the first item in the stack. Check if that's cool */
764 if (is_first_item && item_info->valid_start_item)
770 * Build the flow manager match entry structure from the provided pattern.
771 * The pattern is validated as the items are copied.
774 enic_fm_copy_entry(struct enic_flowman *fm,
775 const struct rte_flow_item pattern[],
776 struct rte_flow_error *error)
778 const struct enic_fm_items *item_info;
779 enum rte_flow_item_type prev_item;
780 const struct rte_flow_item *item;
781 struct copy_item_args args;
782 uint8_t prev_header_level;
783 uint8_t is_first_item;
786 ENICPMD_FUNC_TRACE();
789 prev_item = RTE_FLOW_ITEM_TYPE_END;
791 args.fm_tcam_entry = &fm->tcam_entry;
792 args.header_level = 0;
793 prev_header_level = 0;
794 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
796 * Get info about how to validate and copy the item. If NULL
797 * is returned the nic does not support the item.
799 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
802 item_info = &enic_fm_items[item->type];
804 if (item->type > FM_MAX_ITEM_TYPE ||
805 item_info->copy_item == NULL) {
806 return rte_flow_error_set(error, ENOTSUP,
807 RTE_FLOW_ERROR_TYPE_ITEM,
808 NULL, "enic: unsupported item");
811 /* check to see if item stacking is valid */
812 if (!fm_item_stacking_valid(prev_item, item_info,
817 ret = item_info->copy_item(&args);
819 goto item_not_supported;
820 /* Going from outer to inner? Treat it as a new packet start */
821 if (prev_header_level != args.header_level) {
822 prev_item = RTE_FLOW_ITEM_TYPE_END;
825 prev_item = item->type;
828 prev_header_level = args.header_level;
833 return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM,
834 NULL, "enic: unsupported item type");
837 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
838 item, "enic: unsupported item stack");
842 flow_item_skip_void(const struct rte_flow_item **item)
845 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
850 append_template(void **template, uint8_t *off, const void *data, int len)
852 memcpy(*template, data, len);
853 *template = (char *)*template + len;
858 enic_fm_append_action_op(struct enic_flowman *fm,
859 struct fm_action_op *fm_op,
860 struct rte_flow_error *error)
864 count = fm->action_op_count;
865 ENICPMD_LOG(DEBUG, "append action op: idx=%d op=%u",
866 count, fm_op->fa_op);
867 if (count == FM_ACTION_OP_MAX) {
868 return rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
870 "too many action operations");
872 fm->action.fma_action_ops[count] = *fm_op;
873 fm->action_op_count = count + 1;
877 /* NIC requires that 1st steer appear before decap.
878 * Correct example: steer, decap, steer, steer, ...
881 enic_fm_reorder_action_op(struct enic_flowman *fm)
883 struct fm_action_op *op, *steer, *decap;
884 struct fm_action_op tmp_op;
886 ENICPMD_FUNC_TRACE();
887 /* Find 1st steer and decap */
888 op = fm->action.fma_action_ops;
891 while (op->fa_op != FMOP_END) {
892 if (!decap && op->fa_op == FMOP_DECAP_NOSTRIP)
894 else if (!steer && op->fa_op == FMOP_RQ_STEER)
898 /* If decap is before steer, swap */
899 if (steer && decap && decap < steer) {
900 op = fm->action.fma_action_ops;
901 ENICPMD_LOG(DEBUG, "swap decap %ld <-> steer %ld",
902 (long)(decap - op), (long)(steer - op));
909 /* VXLAN decap is done via flowman compound action */
911 enic_fm_copy_vxlan_decap(struct enic_flowman *fm,
912 struct fm_tcam_match_entry *fmt,
913 const struct rte_flow_action *action,
914 struct rte_flow_error *error)
916 struct fm_header_set *fm_data;
917 struct fm_action_op fm_op;
919 ENICPMD_FUNC_TRACE();
920 fm_data = &fmt->ftm_data.fk_hdrset[0];
921 if (!(fm_data->fk_metadata & FKM_VXLAN)) {
922 return rte_flow_error_set(error, EINVAL,
923 RTE_FLOW_ERROR_TYPE_ACTION, action,
924 "vxlan-decap: vxlan must be in pattern");
927 memset(&fm_op, 0, sizeof(fm_op));
928 fm_op.fa_op = FMOP_DECAP_NOSTRIP;
929 return enic_fm_append_action_op(fm, &fm_op, error);
932 /* VXLAN encap is done via flowman compound action */
934 enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
935 const struct rte_flow_item *item,
936 struct rte_flow_error *error)
938 struct fm_action_op fm_op;
939 struct rte_ether_hdr *eth;
944 ENICPMD_FUNC_TRACE();
945 memset(&fm_op, 0, sizeof(fm_op));
946 fm_op.fa_op = FMOP_ENCAP;
947 template = fm->action.fma_data;
950 * Copy flow items to the flowman template starting L2.
951 * L2 must be ethernet.
953 flow_item_skip_void(&item);
954 if (item->type != RTE_FLOW_ITEM_TYPE_ETH)
955 return rte_flow_error_set(error, EINVAL,
956 RTE_FLOW_ERROR_TYPE_ITEM, item,
957 "vxlan-encap: first item should be ethernet");
958 eth = (struct rte_ether_hdr *)template;
959 ethertype = ð->ether_type;
960 append_template(&template, &off, item->spec,
961 sizeof(struct rte_flow_item_eth));
963 flow_item_skip_void(&item);
965 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
966 const struct rte_flow_item_vlan *spec;
968 ENICPMD_LOG(DEBUG, "vxlan-encap: vlan");
970 fm_op.encap.outer_vlan = rte_be_to_cpu_16(spec->tci);
972 flow_item_skip_void(&item);
974 /* L3 must be IPv4, IPv6 */
975 switch (item->type) {
976 case RTE_FLOW_ITEM_TYPE_IPV4:
978 struct rte_ipv4_hdr *ip4;
980 ENICPMD_LOG(DEBUG, "vxlan-encap: ipv4");
981 *ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
982 ip4 = (struct rte_ipv4_hdr *)template;
984 * Offset of IPv4 length field and its initial value
985 * (IP + UDP + VXLAN) are specified in the action. The NIC
986 * will add inner packet length.
988 fm_op.encap.len1_offset = off +
989 offsetof(struct rte_ipv4_hdr, total_length);
990 fm_op.encap.len1_delta = sizeof(struct rte_ipv4_hdr) +
991 sizeof(struct rte_udp_hdr) +
992 sizeof(struct rte_vxlan_hdr);
993 append_template(&template, &off, item->spec,
994 sizeof(struct rte_ipv4_hdr));
995 ip4->version_ihl = RTE_IPV4_VHL_DEF;
996 if (ip4->time_to_live == 0)
997 ip4->time_to_live = IP_DEFTTL;
998 ip4->next_proto_id = IPPROTO_UDP;
1001 case RTE_FLOW_ITEM_TYPE_IPV6:
1003 struct rte_ipv6_hdr *ip6;
1005 ENICPMD_LOG(DEBUG, "vxlan-encap: ipv6");
1006 *ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1007 ip6 = (struct rte_ipv6_hdr *)template;
1008 fm_op.encap.len1_offset = off +
1009 offsetof(struct rte_ipv6_hdr, payload_len);
1010 fm_op.encap.len1_delta = sizeof(struct rte_udp_hdr) +
1011 sizeof(struct rte_vxlan_hdr);
1012 append_template(&template, &off, item->spec,
1013 sizeof(struct rte_ipv6_hdr));
1014 ip6->vtc_flow |= rte_cpu_to_be_32(IP6_VTC_FLOW);
1015 if (ip6->hop_limits == 0)
1016 ip6->hop_limits = IP_DEFTTL;
1017 ip6->proto = IPPROTO_UDP;
1021 return rte_flow_error_set(error,
1022 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1023 "vxlan-encap: L3 must be IPv4/IPv6");
1026 flow_item_skip_void(&item);
1029 if (item->type != RTE_FLOW_ITEM_TYPE_UDP)
1030 return rte_flow_error_set(error, EINVAL,
1031 RTE_FLOW_ERROR_TYPE_ITEM, item,
1032 "vxlan-encap: UDP must follow IPv4/IPv6");
1033 /* UDP length = UDP + VXLAN. NIC will add inner packet length. */
1034 fm_op.encap.len2_offset =
1035 off + offsetof(struct rte_udp_hdr, dgram_len);
1036 fm_op.encap.len2_delta =
1037 sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr);
1038 append_template(&template, &off, item->spec,
1039 sizeof(struct rte_udp_hdr));
1041 flow_item_skip_void(&item);
1044 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN)
1045 return rte_flow_error_set(error,
1046 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1047 "vxlan-encap: VXLAN must follow UDP");
1048 append_template(&template, &off, item->spec,
1049 sizeof(struct rte_flow_item_vxlan));
1052 * Fill in the rest of the action structure.
1053 * Indicate that we want to encap with vxlan at packet start.
1055 fm_op.encap.template_offset = 0;
1056 fm_op.encap.template_len = off;
1057 return enic_fm_append_action_op(fm, &fm_op, error);
1061 enic_fm_find_vnic(struct enic *enic, const struct rte_pci_addr *addr,
1068 ENICPMD_FUNC_TRACE();
1069 ENICPMD_LOG(DEBUG, "bdf=%x:%x:%x", addr->bus, addr->devid,
1071 bdf = addr->bus << 8 | addr->devid << 3 | addr->function;
1072 args[0] = FM_VNIC_FIND;
1074 rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
1076 ENICPMD_LOG(ERR, "allocating counters rc=%d", rc);
1080 ENICPMD_LOG(DEBUG, "found vnic: handle=0x%" PRIx64, *handle);
1084 /* Translate flow actions to flowman TCAM entry actions */
1086 enic_fm_copy_action(struct enic_flowman *fm,
1087 const struct rte_flow_action actions[],
1089 struct rte_flow_error *error)
1099 struct fm_tcam_match_entry *fmt;
1100 struct fm_action_op fm_op;
1101 bool need_ovlan_action;
1109 ENICPMD_FUNC_TRACE();
1110 fmt = &fm->tcam_entry;
1111 need_ovlan_action = false;
1116 vnic_h = 0; /* 0 = current vNIC */
1117 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1118 switch (actions->type) {
1119 case RTE_FLOW_ACTION_TYPE_VOID:
1121 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1122 if (overlap & PASSTHRU)
1124 overlap |= PASSTHRU;
1127 case RTE_FLOW_ACTION_TYPE_JUMP: {
1128 const struct rte_flow_action_jump *jump =
1130 struct enic_fm_fet *fet;
1134 ret = enic_fet_get(fm, jump->group, ingress, NULL,
1139 memset(&fm_op, 0, sizeof(fm_op));
1140 fm_op.fa_op = FMOP_EXACT_MATCH;
1141 fm_op.exact.handle = fet->handle;
1143 ret = enic_fm_append_action_op(fm, &fm_op, error);
1148 case RTE_FLOW_ACTION_TYPE_MARK: {
1149 const struct rte_flow_action_mark *mark =
1152 if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1153 return rte_flow_error_set(error, EINVAL,
1154 RTE_FLOW_ERROR_TYPE_ACTION,
1155 NULL, "invalid mark id");
1156 memset(&fm_op, 0, sizeof(fm_op));
1157 fm_op.fa_op = FMOP_MARK;
1158 fm_op.mark.mark = mark->id + 1;
1159 ret = enic_fm_append_action_op(fm, &fm_op, error);
1164 case RTE_FLOW_ACTION_TYPE_FLAG: {
1165 /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1166 memset(&fm_op, 0, sizeof(fm_op));
1167 fm_op.fa_op = FMOP_MARK;
1168 fm_op.mark.mark = ENIC_MAGIC_FILTER_ID;
1169 ret = enic_fm_append_action_op(fm, &fm_op, error);
1174 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1175 const struct rte_flow_action_queue *queue =
1179 * If fate other than QUEUE or RSS, fail. Multiple
1180 * rss and queue actions are ok.
1182 if ((overlap & FATE) && first_rq)
1186 memset(&fm_op, 0, sizeof(fm_op));
1187 fm_op.fa_op = FMOP_RQ_STEER;
1188 fm_op.rq_steer.rq_index =
1189 enic_rte_rq_idx_to_sop_idx(queue->index);
1190 fm_op.rq_steer.rq_count = 1;
1191 fm_op.rq_steer.vnic_handle = vnic_h;
1192 ret = enic_fm_append_action_op(fm, &fm_op, error);
1195 ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
1196 fm_op.rq_steer.rq_index);
1199 case RTE_FLOW_ACTION_TYPE_DROP: {
1203 memset(&fm_op, 0, sizeof(fm_op));
1204 fm_op.fa_op = FMOP_DROP;
1205 ret = enic_fm_append_action_op(fm, &fm_op, error);
1208 ENICPMD_LOG(DEBUG, "create DROP action");
1211 case RTE_FLOW_ACTION_TYPE_COUNT: {
1212 if (overlap & COUNT)
1215 /* Count is associated with entry not action on VIC. */
1216 fmt->ftm_flags |= FMEF_COUNTER;
1219 case RTE_FLOW_ACTION_TYPE_RSS: {
1220 const struct rte_flow_action_rss *rss = actions->conf;
1225 * If fate other than QUEUE or RSS, fail. Multiple
1226 * rss and queue actions are ok.
1228 if ((overlap & FATE) && first_rq)
1234 * Hardware only supports RSS actions on outer level
1235 * with default type and function. Queues must be
1238 allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1239 rss->level == 0 && (rss->types == 0 ||
1240 rss->types == enic->rss_hf) &&
1241 rss->queue_num <= enic->rq_count &&
1242 rss->queue[rss->queue_num - 1] < enic->rq_count;
1245 /* Identity queue map needs to be sequential */
1246 for (i = 1; i < rss->queue_num; i++)
1247 allow = allow && (rss->queue[i] ==
1248 rss->queue[i - 1] + 1);
1252 memset(&fm_op, 0, sizeof(fm_op));
1253 fm_op.fa_op = FMOP_RQ_STEER;
1254 fm_op.rq_steer.rq_index =
1255 enic_rte_rq_idx_to_sop_idx(rss->queue[0]);
1256 fm_op.rq_steer.rq_count = rss->queue_num;
1257 fm_op.rq_steer.vnic_handle = vnic_h;
1258 ret = enic_fm_append_action_op(fm, &fm_op, error);
1261 ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
1262 fm_op.rq_steer.rq_index);
1265 case RTE_FLOW_ACTION_TYPE_PORT_ID: {
1266 const struct rte_flow_action_port_id *port;
1267 struct rte_pci_device *pdev;
1268 struct rte_eth_dev *dev;
1270 port = actions->conf;
1271 if (port->original) {
1272 vnic_h = 0; /* This port */
1275 ENICPMD_LOG(DEBUG, "port id %u", port->id);
1276 if (!rte_eth_dev_is_valid_port(port->id)) {
1277 return rte_flow_error_set(error, EINVAL,
1278 RTE_FLOW_ERROR_TYPE_ACTION,
1279 NULL, "invalid port_id");
1281 dev = &rte_eth_devices[port->id];
1282 if (!dev_is_enic(dev)) {
1283 return rte_flow_error_set(error, EINVAL,
1284 RTE_FLOW_ERROR_TYPE_ACTION,
1285 NULL, "port_id is not enic");
1287 pdev = RTE_ETH_DEV_TO_PCI(dev);
1288 if (enic_fm_find_vnic(enic, &pdev->addr, &vnic_h)) {
1289 return rte_flow_error_set(error, EINVAL,
1290 RTE_FLOW_ERROR_TYPE_ACTION,
1291 NULL, "port_id is not vnic");
1295 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: {
1296 if (overlap & DECAP)
1300 ret = enic_fm_copy_vxlan_decap(fm, fmt, actions,
1306 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: {
1307 const struct rte_flow_action_vxlan_encap *encap;
1309 encap = actions->conf;
1310 if (overlap & ENCAP)
1313 ret = enic_fm_copy_vxlan_encap(fm, encap->definition,
1319 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: {
1320 memset(&fm_op, 0, sizeof(fm_op));
1321 fm_op.fa_op = FMOP_POP_VLAN;
1322 ret = enic_fm_append_action_op(fm, &fm_op, error);
1327 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: {
1328 const struct rte_flow_action_of_push_vlan *vlan;
1330 if (overlap & PASSTHRU)
1332 vlan = actions->conf;
1333 if (vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN)) {
1334 return rte_flow_error_set(error, EINVAL,
1335 RTE_FLOW_ERROR_TYPE_ACTION,
1336 NULL, "unexpected push_vlan ethertype");
1338 overlap |= PUSH_VLAN;
1339 need_ovlan_action = true;
1342 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: {
1343 const struct rte_flow_action_of_set_vlan_pcp *pcp;
1345 pcp = actions->conf;
1346 if (pcp->vlan_pcp > 7) {
1347 return rte_flow_error_set(error, EINVAL,
1348 RTE_FLOW_ERROR_TYPE_ACTION,
1349 NULL, "invalid vlan_pcp");
1351 need_ovlan_action = true;
1352 ovlan |= ((uint16_t)pcp->vlan_pcp) << 13;
1355 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: {
1356 const struct rte_flow_action_of_set_vlan_vid *vid;
1358 vid = actions->conf;
1359 need_ovlan_action = true;
1360 ovlan |= rte_be_to_cpu_16(vid->vlan_vid);
1368 if (!(overlap & (FATE | PASSTHRU | COUNT)))
1370 if (need_ovlan_action) {
1371 memset(&fm_op, 0, sizeof(fm_op));
1372 fm_op.fa_op = FMOP_SET_OVLAN;
1373 fm_op.ovlan.vlan = ovlan;
1374 ret = enic_fm_append_action_op(fm, &fm_op, error);
1378 memset(&fm_op, 0, sizeof(fm_op));
1379 fm_op.fa_op = FMOP_END;
1380 ret = enic_fm_append_action_op(fm, &fm_op, error);
1383 enic_fm_reorder_action_op(fm);
1387 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1388 NULL, "enic: unsupported action");
1391 /** Check if the action is supported */
1393 enic_fm_match_action(const struct rte_flow_action *action,
1394 const enum rte_flow_action_type *supported_actions)
1396 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1397 supported_actions++) {
1398 if (action->type == *supported_actions)
1404 /* Debug function to dump internal NIC action structure. */
1406 enic_fm_dump_tcam_actions(const struct fm_action *fm_action)
1408 /* Manually keep in sync with FMOP commands */
1409 const char *fmop_str[FMOP_OP_MAX] = {
1411 [FMOP_DROP] = "drop",
1412 [FMOP_RQ_STEER] = "steer",
1413 [FMOP_EXACT_MATCH] = "exmatch",
1414 [FMOP_MARK] = "mark",
1415 [FMOP_EXT_MARK] = "ext_mark",
1417 [FMOP_EG_HAIRPIN] = "eg_hairpin",
1418 [FMOP_IG_HAIRPIN] = "ig_hairpin",
1419 [FMOP_ENCAP_IVLAN] = "encap_ivlan",
1420 [FMOP_ENCAP_NOIVLAN] = "encap_noivlan",
1421 [FMOP_ENCAP] = "encap",
1422 [FMOP_SET_OVLAN] = "set_ovlan",
1423 [FMOP_DECAP_NOSTRIP] = "decap_nostrip",
1424 [FMOP_DECAP_STRIP] = "decap_strip",
1425 [FMOP_POP_VLAN] = "pop_vlan",
1426 [FMOP_SET_EGPORT] = "set_egport",
1427 [FMOP_RQ_STEER_ONLY] = "rq_steer_only",
1428 [FMOP_SET_ENCAP_VLAN] = "set_encap_vlan",
1429 [FMOP_EMIT] = "emit",
1430 [FMOP_MODIFY] = "modify",
1432 const struct fm_action_op *op = &fm_action->fma_action_ops[0];
1433 char buf[128], *bp = buf;
1438 buf_len = sizeof(buf);
1439 for (i = 0; i < FM_ACTION_OP_MAX; i++) {
1440 if (op->fa_op == FMOP_END)
1442 if (op->fa_op >= FMOP_OP_MAX)
1445 op_str = fmop_str[op->fa_op];
1446 n = snprintf(bp, buf_len, "%s,", op_str);
1447 if (n > 0 && n < buf_len) {
1453 /* Remove trailing comma */
1456 ENICPMD_LOG(DEBUG, " Acions: %s", buf);
1460 bits_to_str(uint32_t bits, const char *strings[], int max,
1461 char *buf, int buf_len)
1463 int i, n = 0, len = 0;
1465 for (i = 0; i < max; i++) {
1466 if (bits & (1 << i)) {
1467 n = snprintf(buf, buf_len, "%s,", strings[i]);
1468 if (n > 0 && n < buf_len) {
1475 /* Remove trailing comma */
1483 /* Debug function to dump internal NIC filter structure. */
1485 __enic_fm_dump_tcam_match(const struct fm_header_set *fk_hdrset, char *buf,
1488 /* Manually keep in sync with FKM_BITS */
1489 const char *fm_fkm_str[FKM_BIT_COUNT] = {
1490 [FKM_QTAG_BIT] = "qtag",
1491 [FKM_CMD_BIT] = "cmd",
1492 [FKM_IPV4_BIT] = "ip4",
1493 [FKM_IPV6_BIT] = "ip6",
1494 [FKM_ROCE_BIT] = "roce",
1495 [FKM_UDP_BIT] = "udp",
1496 [FKM_TCP_BIT] = "tcp",
1497 [FKM_TCPORUDP_BIT] = "tcpportudp",
1498 [FKM_IPFRAG_BIT] = "ipfrag",
1499 [FKM_NVGRE_BIT] = "nvgre",
1500 [FKM_VXLAN_BIT] = "vxlan",
1501 [FKM_GENEVE_BIT] = "geneve",
1502 [FKM_NSH_BIT] = "nsh",
1503 [FKM_ROCEV2_BIT] = "rocev2",
1504 [FKM_VLAN_PRES_BIT] = "vlan_pres",
1505 [FKM_IPOK_BIT] = "ipok",
1506 [FKM_L4OK_BIT] = "l4ok",
1507 [FKM_ROCEOK_BIT] = "roceok",
1508 [FKM_FCSOK_BIT] = "fcsok",
1509 [FKM_EG_SPAN_BIT] = "eg_span",
1510 [FKM_IG_SPAN_BIT] = "ig_span",
1511 [FKM_EG_HAIRPINNED_BIT] = "eg_hairpinned",
1513 /* Manually keep in sync with FKH_BITS */
1514 const char *fm_fkh_str[FKH_BIT_COUNT] = {
1515 [FKH_ETHER_BIT] = "eth",
1516 [FKH_QTAG_BIT] = "qtag",
1517 [FKH_L2RAW_BIT] = "l2raw",
1518 [FKH_IPV4_BIT] = "ip4",
1519 [FKH_IPV6_BIT] = "ip6",
1520 [FKH_L3RAW_BIT] = "l3raw",
1521 [FKH_UDP_BIT] = "udp",
1522 [FKH_TCP_BIT] = "tcp",
1523 [FKH_ICMP_BIT] = "icmp",
1524 [FKH_VXLAN_BIT] = "vxlan",
1525 [FKH_L4RAW_BIT] = "l4raw",
1527 uint32_t fkh_bits = fk_hdrset->fk_header_select;
1528 uint32_t fkm_bits = fk_hdrset->fk_metadata;
1531 if (!fkm_bits && !fkh_bits)
1533 n = snprintf(buf, buf_len, "metadata(");
1534 if (n > 0 && n < buf_len) {
1538 n = bits_to_str(fkm_bits, fm_fkm_str, FKM_BIT_COUNT, buf, buf_len);
1539 if (n > 0 && n < buf_len) {
1543 n = snprintf(buf, buf_len, ") valid hdr fields(");
1544 if (n > 0 && n < buf_len) {
1548 n = bits_to_str(fkh_bits, fm_fkh_str, FKH_BIT_COUNT, buf, buf_len);
1549 if (n > 0 && n < buf_len) {
1553 snprintf(buf, buf_len, ")");
1557 enic_fm_dump_tcam_match(const struct fm_tcam_match_entry *match,
1562 memset(buf, 0, sizeof(buf));
1563 __enic_fm_dump_tcam_match(&match->ftm_mask.fk_hdrset[0],
1565 ENICPMD_LOG(DEBUG, " TCAM %s Outer: %s %scounter",
1566 (ingress) ? "IG" : "EG", buf,
1567 (match->ftm_flags & FMEF_COUNTER) ? "" : "no ");
1568 memset(buf, 0, sizeof(buf));
1569 __enic_fm_dump_tcam_match(&match->ftm_mask.fk_hdrset[1],
1572 ENICPMD_LOG(DEBUG, " Inner: %s", buf);
1575 /* Debug function to dump internal NIC flow structures. */
1577 enic_fm_dump_tcam_entry(const struct fm_tcam_match_entry *fm_match,
1578 const struct fm_action *fm_action,
1581 if (!rte_log_can_log(enic_pmd_logtype, RTE_LOG_DEBUG))
1583 enic_fm_dump_tcam_match(fm_match, ingress);
1584 enic_fm_dump_tcam_actions(fm_action);
1588 enic_fm_flow_parse(struct enic_flowman *fm,
1589 const struct rte_flow_attr *attrs,
1590 const struct rte_flow_item pattern[],
1591 const struct rte_flow_action actions[],
1592 struct rte_flow_error *error)
1594 const struct rte_flow_action *action;
1596 static const enum rte_flow_action_type *sa;
1598 ENICPMD_FUNC_TRACE();
1601 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1602 NULL, "no pattern specified");
1607 rte_flow_error_set(error, EINVAL,
1608 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1609 NULL, "no action specified");
1614 if (attrs->priority) {
1615 rte_flow_error_set(error, ENOTSUP,
1616 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1618 "priorities are not supported");
1620 } else if (attrs->transfer) {
1621 rte_flow_error_set(error, ENOTSUP,
1622 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1624 "transfer is not supported");
1626 } else if (attrs->ingress && attrs->egress) {
1627 rte_flow_error_set(error, ENOTSUP,
1628 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1630 "bidirectional rules not supported");
1635 rte_flow_error_set(error, EINVAL,
1636 RTE_FLOW_ERROR_TYPE_ATTR,
1637 NULL, "no attribute specified");
1641 /* Verify Actions. */
1642 sa = (attrs->ingress) ? enic_fm_supported_ig_actions :
1643 enic_fm_supported_eg_actions;
1644 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1646 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1648 else if (!enic_fm_match_action(action, sa))
1651 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1652 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1653 action, "invalid action");
1656 ret = enic_fm_copy_entry(fm, pattern, error);
1659 ret = enic_fm_copy_action(fm, actions, attrs->ingress, error);
1664 enic_fm_counter_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow)
1666 if (!fm_flow->counter_valid)
1668 SLIST_INSERT_HEAD(&fm->counters, fm_flow->counter, next);
1669 fm_flow->counter_valid = false;
1673 enic_fm_more_counters(struct enic_flowman *fm)
1675 struct enic_fm_counter *new_stack;
1676 struct enic_fm_counter *ctrs;
1681 ENICPMD_FUNC_TRACE();
1683 new_stack = rte_realloc(fm->counter_stack, (fm->counters_alloced +
1684 FM_COUNTERS_EXPAND) *
1685 sizeof(struct enic_fm_counter), 0);
1686 if (new_stack == NULL) {
1687 ENICPMD_LOG(ERR, "cannot alloc counter memory");
1690 fm->counter_stack = new_stack;
1692 args[0] = FM_COUNTER_BRK;
1693 args[1] = fm->counters_alloced + FM_COUNTERS_EXPAND;
1694 rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
1696 ENICPMD_LOG(ERR, "cannot alloc counters rc=%d", rc);
1699 ctrs = (struct enic_fm_counter *)fm->counter_stack +
1700 fm->counters_alloced;
1701 for (i = 0; i < FM_COUNTERS_EXPAND; i++, ctrs++) {
1702 ctrs->handle = fm->counters_alloced + i;
1703 SLIST_INSERT_HEAD(&fm->counters, ctrs, next);
1705 fm->counters_alloced += FM_COUNTERS_EXPAND;
1706 ENICPMD_LOG(DEBUG, "%u counters allocated, total: %u",
1707 FM_COUNTERS_EXPAND, fm->counters_alloced);
1712 enic_fm_counter_zero(struct enic_flowman *fm, struct enic_fm_counter *c)
1718 ENICPMD_FUNC_TRACE();
1720 args[0] = FM_COUNTER_QUERY;
1721 args[1] = c->handle;
1722 args[2] = 1; /* clear */
1723 ret = vnic_dev_flowman_cmd(enic->vdev, args, 3);
1725 ENICPMD_LOG(ERR, "counter init: rc=%d handle=0x%x",
1733 enic_fm_counter_alloc(struct enic_flowman *fm, struct rte_flow_error *error,
1734 struct enic_fm_counter **ctr)
1736 struct enic_fm_counter *c;
1739 ENICPMD_FUNC_TRACE();
1741 if (SLIST_EMPTY(&fm->counters)) {
1742 ret = enic_fm_more_counters(fm);
1744 return rte_flow_error_set(error, -ret,
1745 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1746 NULL, "enic: out of counters");
1748 c = SLIST_FIRST(&fm->counters);
1749 SLIST_REMOVE_HEAD(&fm->counters, next);
1755 enic_fm_action_free(struct enic_flowman *fm, uint64_t handle)
1760 ENICPMD_FUNC_TRACE();
1761 args[0] = FM_ACTION_FREE;
1763 rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
1765 ENICPMD_LOG(ERR, "cannot free action: rc=%d handle=0x%" PRIx64,
1771 enic_fm_entry_free(struct enic_flowman *fm, uint64_t handle)
1776 ENICPMD_FUNC_TRACE();
1777 args[0] = FM_MATCH_ENTRY_REMOVE;
1779 rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
1781 ENICPMD_LOG(ERR, "cannot free match entry: rc=%d"
1782 " handle=0x%" PRIx64, rc, handle);
1786 static struct enic_fm_jump_flow *
1787 find_jump_flow(struct enic_flowman *fm, uint32_t group)
1789 struct enic_fm_jump_flow *j;
1791 ENICPMD_FUNC_TRACE();
1792 TAILQ_FOREACH(j, &fm->jump_list, list) {
1793 if (j->group == group)
1800 remove_jump_flow(struct enic_flowman *fm, struct rte_flow *flow)
1802 struct enic_fm_jump_flow *j;
1804 ENICPMD_FUNC_TRACE();
1805 TAILQ_FOREACH(j, &fm->jump_list, list) {
1806 if (j->flow == flow) {
1807 TAILQ_REMOVE(&fm->jump_list, j, list);
1815 save_jump_flow(struct enic_flowman *fm,
1816 struct rte_flow *flow,
1818 struct fm_tcam_match_entry *match,
1819 struct fm_action *action)
1821 struct enic_fm_jump_flow *j;
1823 ENICPMD_FUNC_TRACE();
1824 j = calloc(1, sizeof(struct enic_fm_jump_flow));
1830 j->action = *action;
1831 TAILQ_INSERT_HEAD(&fm->jump_list, j, list);
1832 ENICPMD_LOG(DEBUG, "saved jump flow: flow=%p group=%u", flow, group);
1837 __enic_fm_flow_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow)
1839 if (fm_flow->entry_handle != FM_INVALID_HANDLE) {
1840 enic_fm_entry_free(fm, fm_flow->entry_handle);
1841 fm_flow->entry_handle = FM_INVALID_HANDLE;
1843 if (fm_flow->action_handle != FM_INVALID_HANDLE) {
1844 enic_fm_action_free(fm, fm_flow->action_handle);
1845 fm_flow->action_handle = FM_INVALID_HANDLE;
1847 enic_fm_counter_free(fm, fm_flow);
1849 enic_fet_put(fm, fm_flow->fet);
1850 fm_flow->fet = NULL;
1855 enic_fm_flow_free(struct enic_flowman *fm, struct rte_flow *flow)
1857 if (flow->fm->fet && flow->fm->fet->default_key)
1858 remove_jump_flow(fm, flow);
1859 __enic_fm_flow_free(fm, flow->fm);
1865 enic_fm_add_tcam_entry(struct enic_flowman *fm,
1866 struct fm_tcam_match_entry *match_in,
1867 uint64_t *entry_handle,
1869 struct rte_flow_error *error)
1871 struct fm_tcam_match_entry *ftm;
1875 ENICPMD_FUNC_TRACE();
1876 /* Copy entry to the command buffer */
1877 ftm = &fm->cmd.va->fm_tcam_match_entry;
1878 memcpy(ftm, match_in, sizeof(*ftm));
1879 /* Add TCAM entry */
1880 args[0] = FM_TCAM_ENTRY_INSTALL;
1881 args[1] = ingress ? fm->ig_tcam_hndl : fm->eg_tcam_hndl;
1882 args[2] = fm->cmd.pa;
1883 ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 3);
1885 ENICPMD_LOG(ERR, "cannot add %s TCAM entry: rc=%d",
1886 ingress ? "ingress" : "egress", ret);
1887 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1888 NULL, "enic: devcmd(tcam-entry-install)");
1891 ENICPMD_LOG(DEBUG, "installed %s TCAM entry: handle=0x%" PRIx64,
1892 ingress ? "ingress" : "egress", (uint64_t)args[0]);
1893 *entry_handle = args[0];
1898 enic_fm_add_exact_entry(struct enic_flowman *fm,
1899 struct fm_tcam_match_entry *match_in,
1900 uint64_t *entry_handle,
1901 struct enic_fm_fet *fet,
1902 struct rte_flow_error *error)
1904 struct fm_exact_match_entry *fem;
1908 ENICPMD_FUNC_TRACE();
1909 /* The new entry must have the table's key */
1910 if (memcmp(fet->key.fk_hdrset, match_in->ftm_mask.fk_hdrset,
1911 sizeof(struct fm_header_set) * FM_HDRSET_MAX)) {
1912 return rte_flow_error_set(error, EINVAL,
1913 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1914 "enic: key does not match group's key");
1917 /* Copy entry to the command buffer */
1918 fem = &fm->cmd.va->fm_exact_match_entry;
1920 * Translate TCAM entry to exact entry. As is only need to drop
1921 * position and mask. The mask is part of the exact match table.
1922 * Position (aka priority) is not supported in the exact match table.
1924 fem->fem_data = match_in->ftm_data;
1925 fem->fem_flags = match_in->ftm_flags;
1926 fem->fem_action = match_in->ftm_action;
1927 fem->fem_counter = match_in->ftm_counter;
1929 /* Add exact entry */
1930 args[0] = FM_EXACT_ENTRY_INSTALL;
1931 args[1] = fet->handle;
1932 args[2] = fm->cmd.pa;
1933 ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 3);
1935 ENICPMD_LOG(ERR, "cannot add %s exact entry: group=%u",
1936 fet->ingress ? "ingress" : "egress", fet->group);
1937 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1938 NULL, "enic: devcmd(exact-entry-install)");
1941 ENICPMD_LOG(DEBUG, "installed %s exact entry: group=%u"
1942 " handle=0x%" PRIx64,
1943 fet->ingress ? "ingress" : "egress", fet->group,
1945 *entry_handle = args[0];
1949 /* Push match-action to the NIC. */
1951 __enic_fm_flow_add_entry(struct enic_flowman *fm,
1952 struct enic_fm_flow *fm_flow,
1953 struct fm_tcam_match_entry *match_in,
1954 struct fm_action *action_in,
1957 struct rte_flow_error *error)
1959 struct enic_fm_counter *ctr;
1960 struct fm_action *fma;
1966 ENICPMD_FUNC_TRACE();
1967 /* Allocate action. */
1968 fma = &fm->cmd.va->fm_action;
1969 memcpy(fma, action_in, sizeof(*fma));
1970 args[0] = FM_ACTION_ALLOC;
1971 args[1] = fm->cmd.pa;
1972 ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
1974 ENICPMD_LOG(ERR, "allocating TCAM table action rc=%d", ret);
1975 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1976 NULL, "enic: devcmd(action-alloc)");
1980 fm_flow->action_handle = action_h;
1981 match_in->ftm_action = action_h;
1982 ENICPMD_LOG(DEBUG, "action allocated: handle=0x%" PRIx64, action_h);
1984 /* Allocate counter if requested. */
1985 if (match_in->ftm_flags & FMEF_COUNTER) {
1986 ret = enic_fm_counter_alloc(fm, error, &ctr);
1987 if (ret) /* error has been filled in */
1989 fm_flow->counter_valid = true;
1990 fm_flow->counter = ctr;
1991 match_in->ftm_counter = ctr->handle;
1995 * Get the group's table (either TCAM or exact match table) and
1996 * add entry to it. If we use the exact match table, the handler
1997 * will translate the TCAM entry (match_in) to the appropriate
1998 * exact match entry and use that instead.
2000 entry_h = FM_INVALID_HANDLE;
2001 if (group == FM_TCAM_RTE_GROUP) {
2002 ret = enic_fm_add_tcam_entry(fm, match_in, &entry_h, ingress,
2006 /* Jump action might have a ref to fet */
2007 fm_flow->fet = fm->fet;
2010 struct enic_fm_fet *fet = NULL;
2012 ret = enic_fet_get(fm, group, ingress,
2013 &match_in->ftm_mask, &fet, error);
2017 ret = enic_fm_add_exact_entry(fm, match_in, &entry_h, fet,
2022 /* Clear counter after adding entry, as it requires in-use counter */
2023 if (fm_flow->counter_valid) {
2024 ret = enic_fm_counter_zero(fm, fm_flow->counter);
2028 fm_flow->entry_handle = entry_h;
2032 /* Push match-action to the NIC. */
2033 static struct rte_flow *
2034 enic_fm_flow_add_entry(struct enic_flowman *fm,
2035 struct fm_tcam_match_entry *match_in,
2036 struct fm_action *action_in,
2037 const struct rte_flow_attr *attrs,
2038 struct rte_flow_error *error)
2040 struct enic_fm_flow *fm_flow;
2041 struct rte_flow *flow;
2043 ENICPMD_FUNC_TRACE();
2044 enic_fm_dump_tcam_entry(match_in, action_in, attrs->ingress);
2045 flow = calloc(1, sizeof(*flow));
2046 fm_flow = calloc(1, sizeof(*fm_flow));
2047 if (flow == NULL || fm_flow == NULL) {
2048 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2049 NULL, "enic: cannot allocate rte_flow");
2055 fm_flow->action_handle = FM_INVALID_HANDLE;
2056 fm_flow->entry_handle = FM_INVALID_HANDLE;
2057 if (__enic_fm_flow_add_entry(fm, fm_flow, match_in, action_in,
2058 attrs->group, attrs->ingress, error)) {
2059 enic_fm_flow_free(fm, flow);
2066 convert_jump_flows(struct enic_flowman *fm, struct enic_fm_fet *fet,
2067 struct rte_flow_error *error)
2069 struct enic_fm_flow *fm_flow;
2070 struct enic_fm_jump_flow *j;
2071 struct fm_action *fma;
2074 ENICPMD_FUNC_TRACE();
2076 * Find the saved flows that should jump to the new table (fet).
2077 * Then delete the old TCAM entry that jumps to the default table,
2078 * and add a new one that jumps to the new table.
2081 j = find_jump_flow(fm, group);
2083 ENICPMD_LOG(DEBUG, "convert jump flow: flow=%p group=%u",
2085 /* Delete old entry */
2086 fm_flow = j->flow->fm;
2087 __enic_fm_flow_free(fm, fm_flow);
2091 fma->fma_action_ops[0].exact.handle = fet->handle;
2092 if (__enic_fm_flow_add_entry(fm, fm_flow, &j->match, fma,
2093 FM_TCAM_RTE_GROUP, fet->ingress, error)) {
2094 /* Cannot roll back changes at the moment */
2095 ENICPMD_LOG(ERR, "cannot convert jump flow: flow=%p",
2100 ENICPMD_LOG(DEBUG, "convert ok: group=%u ref=%u",
2101 fet->group, fet->ref);
2104 TAILQ_REMOVE(&fm->jump_list, j, list);
2106 j = find_jump_flow(fm, group);
2111 enic_fm_open_scratch(struct enic_flowman *fm)
2113 fm->action_op_count = 0;
2115 memset(&fm->tcam_entry, 0, sizeof(fm->tcam_entry));
2116 memset(&fm->action, 0, sizeof(fm->action));
2120 enic_fm_close_scratch(struct enic_flowman *fm)
2123 enic_fet_put(fm, fm->fet);
2126 fm->action_op_count = 0;
2130 enic_fm_flow_validate(struct rte_eth_dev *dev,
2131 const struct rte_flow_attr *attrs,
2132 const struct rte_flow_item pattern[],
2133 const struct rte_flow_action actions[],
2134 struct rte_flow_error *error)
2136 struct fm_tcam_match_entry *fm_tcam_entry;
2137 struct fm_action *fm_action;
2138 struct enic_flowman *fm;
2141 ENICPMD_FUNC_TRACE();
2142 fm = pmd_priv(dev)->fm;
2145 enic_fm_open_scratch(fm);
2146 ret = enic_fm_flow_parse(fm, attrs, pattern, actions, error);
2148 fm_tcam_entry = &fm->tcam_entry;
2149 fm_action = &fm->action;
2150 enic_fm_dump_tcam_entry(fm_tcam_entry, fm_action,
2153 enic_fm_close_scratch(fm);
2158 enic_fm_flow_query_count(struct rte_eth_dev *dev,
2159 struct rte_flow *flow, void *data,
2160 struct rte_flow_error *error)
2162 struct rte_flow_query_count *query;
2163 struct enic_fm_flow *fm_flow;
2168 ENICPMD_FUNC_TRACE();
2169 enic = pmd_priv(dev);
2172 if (!fm_flow->counter_valid)
2173 return rte_flow_error_set(error, ENOTSUP,
2174 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2175 "enic: flow does not have counter");
2177 args[0] = FM_COUNTER_QUERY;
2178 args[1] = fm_flow->counter->handle;
2179 args[2] = query->reset;
2180 rc = vnic_dev_flowman_cmd(enic->vdev, args, 3);
2182 ENICPMD_LOG(ERR, "cannot query counter: rc=%d handle=0x%x",
2183 rc, fm_flow->counter->handle);
2186 query->hits_set = 1;
2187 query->hits = args[0];
2188 query->bytes_set = 1;
2189 query->bytes = args[1];
2194 enic_fm_flow_query(struct rte_eth_dev *dev,
2195 struct rte_flow *flow,
2196 const struct rte_flow_action *actions,
2198 struct rte_flow_error *error)
2202 ENICPMD_FUNC_TRACE();
2203 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2204 switch (actions->type) {
2205 case RTE_FLOW_ACTION_TYPE_VOID:
2207 case RTE_FLOW_ACTION_TYPE_COUNT:
2208 ret = enic_fm_flow_query_count(dev, flow, data, error);
2211 return rte_flow_error_set(error, ENOTSUP,
2212 RTE_FLOW_ERROR_TYPE_ACTION,
2214 "action not supported");
2222 static struct rte_flow *
2223 enic_fm_flow_create(struct rte_eth_dev *dev,
2224 const struct rte_flow_attr *attrs,
2225 const struct rte_flow_item pattern[],
2226 const struct rte_flow_action actions[],
2227 struct rte_flow_error *error)
2229 struct fm_tcam_match_entry *fm_tcam_entry;
2230 struct fm_action *fm_action;
2231 struct enic_flowman *fm;
2232 struct enic_fm_fet *fet;
2233 struct rte_flow *flow;
2237 ENICPMD_FUNC_TRACE();
2238 enic = pmd_priv(dev);
2241 rte_flow_error_set(error, ENOTSUP,
2242 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2243 "flowman is not initialized");
2246 enic_fm_open_scratch(fm);
2248 ret = enic_fm_flow_parse(fm, attrs, pattern, actions, error);
2250 goto error_with_scratch;
2251 fm_tcam_entry = &fm->tcam_entry;
2252 fm_action = &fm->action;
2253 flow = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
2256 LIST_INSERT_HEAD(&enic->flows, flow, next);
2257 fet = flow->fm->fet;
2258 if (fet && fet->default_key) {
2260 * Jump to non-existent group? Save the relevant info
2261 * so we can convert this flow when that group
2264 save_jump_flow(fm, flow, fet->group,
2265 fm_tcam_entry, fm_action);
2266 } else if (fet && fet->ref == 1) {
2268 * A new table is created. Convert the saved flows
2269 * that should jump to this group.
2271 convert_jump_flows(fm, fet, error);
2276 enic_fm_close_scratch(fm);
2281 enic_fm_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2282 __rte_unused struct rte_flow_error *error)
2284 struct enic *enic = pmd_priv(dev);
2286 ENICPMD_FUNC_TRACE();
2287 if (enic->fm == NULL)
2289 LIST_REMOVE(flow, next);
2290 enic_fm_flow_free(enic->fm, flow);
2295 enic_fm_flow_flush(struct rte_eth_dev *dev,
2296 __rte_unused struct rte_flow_error *error)
2298 struct enic_fm_flow *fm_flow;
2299 struct enic_flowman *fm;
2300 struct rte_flow *flow;
2301 struct enic *enic = pmd_priv(dev);
2303 ENICPMD_FUNC_TRACE();
2304 if (enic->fm == NULL)
2307 while (!LIST_EMPTY(&enic->flows)) {
2308 flow = LIST_FIRST(&enic->flows);
2310 LIST_REMOVE(flow, next);
2312 * If tables are null, then vNIC is closing, and the firmware
2313 * has already cleaned up flowman state. So do not try to free
2314 * resources, as it only causes errors.
2316 if (fm->ig_tcam_hndl == FM_INVALID_HANDLE) {
2317 fm_flow->entry_handle = FM_INVALID_HANDLE;
2318 fm_flow->action_handle = FM_INVALID_HANDLE;
2319 fm_flow->fet = NULL;
2321 enic_fm_flow_free(fm, flow);
2327 enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle)
2332 args[0] = FM_MATCH_TABLE_FREE;
2334 rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
2336 ENICPMD_LOG(ERR, "cannot free table: rc=%d handle=0x%" PRIx64,
2342 enic_fm_tcam_tbl_alloc(struct enic_flowman *fm, uint32_t direction,
2343 uint32_t max_entries, uint64_t *handle)
2345 struct fm_tcam_match_table *tcam_tbl;
2350 ENICPMD_FUNC_TRACE();
2352 tcam_tbl = &fm->cmd.va->fm_tcam_match_table;
2353 tcam_tbl->ftt_direction = direction;
2354 tcam_tbl->ftt_stage = FM_STAGE_LAST;
2355 tcam_tbl->ftt_max_entries = max_entries;
2356 args[0] = FM_TCAM_TABLE_ALLOC;
2357 args[1] = fm->cmd.pa;
2358 rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
2360 ENICPMD_LOG(ERR, "cannot alloc %s TCAM table: rc=%d",
2361 (direction == FM_INGRESS) ? "IG" : "EG", rc);
2365 ENICPMD_LOG(DEBUG, "%s TCAM table allocated, handle=0x%" PRIx64,
2366 (direction == FM_INGRESS) ? "IG" : "EG", *handle);
2371 enic_fm_init_counters(struct enic_flowman *fm)
2373 ENICPMD_FUNC_TRACE();
2374 SLIST_INIT(&fm->counters);
2375 return enic_fm_more_counters(fm);
2379 enic_fm_free_all_counters(struct enic_flowman *fm)
2386 args[0] = FM_COUNTER_BRK;
2388 rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
2390 ENICPMD_LOG(ERR, "cannot free counters: rc=%d", rc);
2391 rte_free(fm->counter_stack);
2395 enic_fm_alloc_tcam_tables(struct enic_flowman *fm)
2399 ENICPMD_FUNC_TRACE();
2400 rc = enic_fm_tcam_tbl_alloc(fm, FM_INGRESS, FM_MAX_TCAM_TABLE_SIZE,
2404 rc = enic_fm_tcam_tbl_alloc(fm, FM_EGRESS, FM_MAX_TCAM_TABLE_SIZE,
2410 enic_fm_free_tcam_tables(struct enic_flowman *fm)
2412 ENICPMD_FUNC_TRACE();
2413 if (fm->ig_tcam_hndl) {
2414 ENICPMD_LOG(DEBUG, "free IG TCAM table handle=0x%" PRIx64,
2416 enic_fm_tbl_free(fm, fm->ig_tcam_hndl);
2417 fm->ig_tcam_hndl = FM_INVALID_HANDLE;
2419 if (fm->eg_tcam_hndl) {
2420 ENICPMD_LOG(DEBUG, "free EG TCAM table handle=0x%" PRIx64,
2422 enic_fm_tbl_free(fm, fm->eg_tcam_hndl);
2423 fm->eg_tcam_hndl = FM_INVALID_HANDLE;
2428 enic_fm_init(struct enic *enic)
2430 struct enic_flowman *fm;
2431 uint8_t name[RTE_MEMZONE_NAMESIZE];
2434 if (enic->flow_filter_mode != FILTER_FLOWMAN)
2436 ENICPMD_FUNC_TRACE();
2437 fm = calloc(1, sizeof(*fm));
2439 ENICPMD_LOG(ERR, "cannot alloc flowman struct");
2443 TAILQ_INIT(&fm->fet_list);
2444 TAILQ_INIT(&fm->jump_list);
2445 /* Allocate host memory for flowman commands */
2446 snprintf((char *)name, sizeof(name), "fm-cmd-%s", enic->bdf_name);
2447 fm->cmd.va = enic_alloc_consistent(enic,
2448 sizeof(union enic_flowman_cmd_mem), &fm->cmd.pa, name);
2450 ENICPMD_LOG(ERR, "cannot allocate flowman command memory");
2454 /* Allocate TCAM tables upfront as they are the main tables */
2455 rc = enic_fm_alloc_tcam_tables(fm);
2457 ENICPMD_LOG(ERR, "cannot alloc TCAM tables");
2460 /* Then a number of counters */
2461 rc = enic_fm_init_counters(fm);
2463 ENICPMD_LOG(ERR, "cannot alloc counters");
2467 * One default exact match table for each direction. We hold onto
2470 rc = enic_fet_alloc(fm, 1, NULL, 128, &fm->default_ig_fet);
2472 ENICPMD_LOG(ERR, "cannot alloc default IG exact match table");
2473 goto error_counters;
2475 fm->default_ig_fet->ref = 1;
2476 rc = enic_fet_alloc(fm, 0, NULL, 128, &fm->default_eg_fet);
2478 ENICPMD_LOG(ERR, "cannot alloc default EG exact match table");
2481 fm->default_eg_fet->ref = 1;
2486 enic_fet_free(fm, fm->default_ig_fet);
2488 enic_fm_free_all_counters(fm);
2490 enic_fm_free_tcam_tables(fm);
2492 enic_free_consistent(enic, sizeof(union enic_flowman_cmd_mem),
2493 fm->cmd.va, fm->cmd.pa);
2500 enic_fm_destroy(struct enic *enic)
2502 struct enic_flowman *fm;
2503 struct enic_fm_fet *fet;
2505 if (enic->fm == NULL)
2507 ENICPMD_FUNC_TRACE();
2509 enic_fet_free(fm, fm->default_eg_fet);
2510 enic_fet_free(fm, fm->default_ig_fet);
2511 /* Free all exact match tables still open */
2512 while (!TAILQ_EMPTY(&fm->fet_list)) {
2513 fet = TAILQ_FIRST(&fm->fet_list);
2514 enic_fet_free(fm, fet);
2516 enic_fm_free_tcam_tables(fm);
2517 enic_fm_free_all_counters(fm);
2518 enic_free_consistent(enic, sizeof(union enic_flowman_cmd_mem),
2519 fm->cmd.va, fm->cmd.pa);
2525 const struct rte_flow_ops enic_fm_flow_ops = {
2526 .validate = enic_fm_flow_validate,
2527 .create = enic_fm_flow_create,
2528 .destroy = enic_fm_flow_destroy,
2529 .flush = enic_fm_flow_flush,
2530 .query = enic_fm_flow_query,