1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2019 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
13 #include <rte_memzone.h>
15 #include "enic_compat.h"
20 #define IP_DEFTTL 64 /* from RFC 1340. */
21 #define IP6_VTC_FLOW 0x60000000
23 /* Highest Item type supported by Flowman */
24 #define FM_MAX_ITEM_TYPE RTE_FLOW_ITEM_TYPE_VXLAN
26 /* Up to 1024 TCAM entries */
27 #define FM_MAX_TCAM_TABLE_SIZE 1024
29 /* Up to 4096 entries per exact match table */
30 #define FM_MAX_EXACT_TABLE_SIZE 4096
32 /* Number of counters to increase on for each increment */
33 #define FM_COUNTERS_EXPAND 100
35 #define FM_INVALID_HANDLE 0
38 * Flow exact match tables (FET) in the VIC and rte_flow groups.
39 * Use a simple scheme to map groups to tables.
40 * Group 0 uses the single TCAM tables, one for each direction.
41 * Group 1, 2, ... uses its own exact match table.
43 * The TCAM tables are allocated upfront during init.
45 * Exact match tables are allocated on demand. 3 paths that lead allocations.
47 * 1. Add a flow that jumps from group 0 to group N.
49 * If N does not exist, we allocate an exact match table for it, using
50 * a dummy key. A key is required for the table.
52 * 2. Add a flow that uses group N.
54 * If N does not exist, we allocate an exact match table for it, using
55 * the flow's key. Subsequent flows to the same group all should have
58 * Without a jump flow to N, N is not reachable in hardware. No packets
61 * 3. Add a flow to an empty group N.
63 * N has been created via (1) and the dummy key. We free that table, allocate
64 * a new table using the new flow's key. Also re-do the existing jump flow to
65 * point to the new table.
67 #define FM_TCAM_RTE_GROUP 0
70 TAILQ_ENTRY(enic_fm_fet) list;
71 uint32_t group; /* rte_flow group ID */
72 uint64_t handle; /* Exact match table handle from flowman */
75 int ref; /* Reference count via get/put */
76 struct fm_key_template key; /* Key associated with the table */
79 struct enic_fm_counter {
80 SLIST_ENTRY(enic_fm_counter) next;
87 uint64_t entry_handle;
88 uint64_t action_handle;
89 struct enic_fm_counter *counter;
90 struct enic_fm_fet *fet;
93 struct enic_fm_jump_flow {
94 TAILQ_ENTRY(enic_fm_jump_flow) list;
95 struct rte_flow *flow;
97 struct fm_tcam_match_entry match;
98 struct fm_action action;
102 * Flowman uses host memory for commands. This structure is allocated
103 * in DMA-able memory.
105 union enic_flowman_cmd_mem {
106 struct fm_tcam_match_table fm_tcam_match_table;
107 struct fm_exact_match_table fm_exact_match_table;
108 struct fm_tcam_match_entry fm_tcam_match_entry;
109 struct fm_exact_match_entry fm_exact_match_entry;
110 struct fm_action fm_action;
113 struct enic_flowman {
117 union enic_flowman_cmd_mem *va;
120 /* TCAM tables allocated upfront, used for group 0 */
121 uint64_t ig_tcam_hndl;
122 uint64_t eg_tcam_hndl;
124 SLIST_HEAD(enic_free_counters, enic_fm_counter) counters;
126 uint32_t counters_alloced;
127 /* Exact match tables for groups != 0, dynamically allocated */
128 TAILQ_HEAD(fet_list, enic_fm_fet) fet_list;
130 * Default exact match tables used for jump actions to
131 * non-existent groups.
133 struct enic_fm_fet *default_eg_fet;
134 struct enic_fm_fet *default_ig_fet;
135 /* Flows that jump to the default table above */
136 TAILQ_HEAD(jump_flow_list, enic_fm_jump_flow) jump_list;
138 * Scratch data used during each invocation of flow_create
141 struct enic_fm_fet *fet;
142 struct fm_tcam_match_entry tcam_entry;
143 struct fm_action action;
144 struct fm_action action_tmp; /* enic_fm_reorder_action_op */
148 static int enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle);
151 * Common arguments passed to copy_item functions. Use this structure
152 * so we can easily add new arguments.
153 * item: Item specification.
154 * fm_tcam_entry: Flowman TCAM match entry.
155 * header_level: 0 for outer header, 1 for inner header.
157 struct copy_item_args {
158 const struct rte_flow_item *item;
159 struct fm_tcam_match_entry *fm_tcam_entry;
160 uint8_t header_level;
163 /* functions for copying items into flowman match */
164 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
166 /* Info about how to copy items into flowman match */
167 struct enic_fm_items {
168 /* Function for copying and validating an item. */
169 enic_copy_item_fn * const copy_item;
170 /* List of valid previous items. */
171 const enum rte_flow_item_type * const prev_items;
173 * True if it's OK for this item to be the first item. For some NIC
174 * versions, it's invalid to start the stack above layer 3.
176 const uint8_t valid_start_item;
179 static enic_copy_item_fn enic_fm_copy_item_eth;
180 static enic_copy_item_fn enic_fm_copy_item_ipv4;
181 static enic_copy_item_fn enic_fm_copy_item_ipv6;
182 static enic_copy_item_fn enic_fm_copy_item_raw;
183 static enic_copy_item_fn enic_fm_copy_item_sctp;
184 static enic_copy_item_fn enic_fm_copy_item_tcp;
185 static enic_copy_item_fn enic_fm_copy_item_udp;
186 static enic_copy_item_fn enic_fm_copy_item_vlan;
187 static enic_copy_item_fn enic_fm_copy_item_vxlan;
189 /* Ingress actions */
190 static const enum rte_flow_action_type enic_fm_supported_ig_actions[] = {
191 RTE_FLOW_ACTION_TYPE_COUNT,
192 RTE_FLOW_ACTION_TYPE_DROP,
193 RTE_FLOW_ACTION_TYPE_FLAG,
194 RTE_FLOW_ACTION_TYPE_JUMP,
195 RTE_FLOW_ACTION_TYPE_MARK,
196 RTE_FLOW_ACTION_TYPE_PORT_ID,
197 RTE_FLOW_ACTION_TYPE_PASSTHRU,
198 RTE_FLOW_ACTION_TYPE_QUEUE,
199 RTE_FLOW_ACTION_TYPE_RSS,
200 RTE_FLOW_ACTION_TYPE_VOID,
201 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
202 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
203 RTE_FLOW_ACTION_TYPE_END, /* END must be the last entry */
207 static const enum rte_flow_action_type enic_fm_supported_eg_actions[] = {
208 RTE_FLOW_ACTION_TYPE_COUNT,
209 RTE_FLOW_ACTION_TYPE_DROP,
210 RTE_FLOW_ACTION_TYPE_JUMP,
211 RTE_FLOW_ACTION_TYPE_PASSTHRU,
212 RTE_FLOW_ACTION_TYPE_VOID,
213 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
214 RTE_FLOW_ACTION_TYPE_END,
217 static const struct enic_fm_items enic_fm_items[] = {
218 [RTE_FLOW_ITEM_TYPE_RAW] = {
219 .copy_item = enic_fm_copy_item_raw,
220 .valid_start_item = 0,
221 .prev_items = (const enum rte_flow_item_type[]) {
222 RTE_FLOW_ITEM_TYPE_UDP,
223 RTE_FLOW_ITEM_TYPE_END,
226 [RTE_FLOW_ITEM_TYPE_ETH] = {
227 .copy_item = enic_fm_copy_item_eth,
228 .valid_start_item = 1,
229 .prev_items = (const enum rte_flow_item_type[]) {
230 RTE_FLOW_ITEM_TYPE_END,
233 [RTE_FLOW_ITEM_TYPE_VLAN] = {
234 .copy_item = enic_fm_copy_item_vlan,
235 .valid_start_item = 1,
236 .prev_items = (const enum rte_flow_item_type[]) {
237 RTE_FLOW_ITEM_TYPE_ETH,
238 RTE_FLOW_ITEM_TYPE_END,
241 [RTE_FLOW_ITEM_TYPE_IPV4] = {
242 .copy_item = enic_fm_copy_item_ipv4,
243 .valid_start_item = 1,
244 .prev_items = (const enum rte_flow_item_type[]) {
245 RTE_FLOW_ITEM_TYPE_ETH,
246 RTE_FLOW_ITEM_TYPE_VLAN,
247 RTE_FLOW_ITEM_TYPE_END,
250 [RTE_FLOW_ITEM_TYPE_IPV6] = {
251 .copy_item = enic_fm_copy_item_ipv6,
252 .valid_start_item = 1,
253 .prev_items = (const enum rte_flow_item_type[]) {
254 RTE_FLOW_ITEM_TYPE_ETH,
255 RTE_FLOW_ITEM_TYPE_VLAN,
256 RTE_FLOW_ITEM_TYPE_END,
259 [RTE_FLOW_ITEM_TYPE_UDP] = {
260 .copy_item = enic_fm_copy_item_udp,
261 .valid_start_item = 1,
262 .prev_items = (const enum rte_flow_item_type[]) {
263 RTE_FLOW_ITEM_TYPE_IPV4,
264 RTE_FLOW_ITEM_TYPE_IPV6,
265 RTE_FLOW_ITEM_TYPE_END,
268 [RTE_FLOW_ITEM_TYPE_TCP] = {
269 .copy_item = enic_fm_copy_item_tcp,
270 .valid_start_item = 1,
271 .prev_items = (const enum rte_flow_item_type[]) {
272 RTE_FLOW_ITEM_TYPE_IPV4,
273 RTE_FLOW_ITEM_TYPE_IPV6,
274 RTE_FLOW_ITEM_TYPE_END,
277 [RTE_FLOW_ITEM_TYPE_SCTP] = {
278 .copy_item = enic_fm_copy_item_sctp,
279 .valid_start_item = 0,
280 .prev_items = (const enum rte_flow_item_type[]) {
281 RTE_FLOW_ITEM_TYPE_IPV4,
282 RTE_FLOW_ITEM_TYPE_IPV6,
283 RTE_FLOW_ITEM_TYPE_END,
286 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
287 .copy_item = enic_fm_copy_item_vxlan,
288 .valid_start_item = 1,
289 .prev_items = (const enum rte_flow_item_type[]) {
290 RTE_FLOW_ITEM_TYPE_UDP,
291 RTE_FLOW_ITEM_TYPE_END,
297 enic_fm_copy_item_eth(struct copy_item_args *arg)
299 const struct rte_flow_item *item = arg->item;
300 const struct rte_flow_item_eth *spec = item->spec;
301 const struct rte_flow_item_eth *mask = item->mask;
302 const uint8_t lvl = arg->header_level;
303 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
304 struct fm_header_set *fm_data, *fm_mask;
306 ENICPMD_FUNC_TRACE();
307 /* Match all if no spec */
311 mask = &rte_flow_item_eth_mask;
312 fm_data = &entry->ftm_data.fk_hdrset[lvl];
313 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
314 fm_data->fk_header_select |= FKH_ETHER;
315 fm_mask->fk_header_select |= FKH_ETHER;
316 memcpy(&fm_data->l2.eth, spec, sizeof(*spec));
317 memcpy(&fm_mask->l2.eth, mask, sizeof(*mask));
322 enic_fm_copy_item_vlan(struct copy_item_args *arg)
324 const struct rte_flow_item *item = arg->item;
325 const struct rte_flow_item_vlan *spec = item->spec;
326 const struct rte_flow_item_vlan *mask = item->mask;
327 const uint8_t lvl = arg->header_level;
328 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
329 struct fm_header_set *fm_data, *fm_mask;
330 struct rte_ether_hdr *eth_mask;
331 struct rte_ether_hdr *eth_val;
334 ENICPMD_FUNC_TRACE();
335 fm_data = &entry->ftm_data.fk_hdrset[lvl];
336 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
337 /* Outer and inner packet vlans need different flags */
338 meta = FKM_VLAN_PRES;
341 fm_data->fk_metadata |= meta;
342 fm_mask->fk_metadata |= meta;
344 /* Match all if no spec */
348 mask = &rte_flow_item_vlan_mask;
350 eth_mask = (void *)&fm_mask->l2.eth;
351 eth_val = (void *)&fm_data->l2.eth;
353 /* Outer TPID cannot be matched */
354 if (eth_mask->ether_type)
358 * When packet matching, the VIC always compares vlan-stripped
359 * L2, regardless of vlan stripping settings. So, the inner type
360 * from vlan becomes the ether type of the eth header.
362 eth_mask->ether_type = mask->inner_type;
363 eth_val->ether_type = spec->inner_type;
364 fm_data->fk_header_select |= FKH_ETHER | FKH_QTAG;
365 fm_mask->fk_header_select |= FKH_ETHER | FKH_QTAG;
366 fm_data->fk_vlan = rte_be_to_cpu_16(spec->tci);
367 fm_mask->fk_vlan = rte_be_to_cpu_16(mask->tci);
372 enic_fm_copy_item_ipv4(struct copy_item_args *arg)
374 const struct rte_flow_item *item = arg->item;
375 const struct rte_flow_item_ipv4 *spec = item->spec;
376 const struct rte_flow_item_ipv4 *mask = item->mask;
377 const uint8_t lvl = arg->header_level;
378 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
379 struct fm_header_set *fm_data, *fm_mask;
381 ENICPMD_FUNC_TRACE();
382 fm_data = &entry->ftm_data.fk_hdrset[lvl];
383 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
384 fm_data->fk_metadata |= FKM_IPV4;
385 fm_mask->fk_metadata |= FKM_IPV4;
390 mask = &rte_flow_item_ipv4_mask;
392 fm_data->fk_header_select |= FKH_IPV4;
393 fm_mask->fk_header_select |= FKH_IPV4;
394 memcpy(&fm_data->l3.ip4, spec, sizeof(*spec));
395 memcpy(&fm_mask->l3.ip4, mask, sizeof(*mask));
400 enic_fm_copy_item_ipv6(struct copy_item_args *arg)
402 const struct rte_flow_item *item = arg->item;
403 const struct rte_flow_item_ipv6 *spec = item->spec;
404 const struct rte_flow_item_ipv6 *mask = item->mask;
405 const uint8_t lvl = arg->header_level;
406 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
407 struct fm_header_set *fm_data, *fm_mask;
409 ENICPMD_FUNC_TRACE();
410 fm_data = &entry->ftm_data.fk_hdrset[lvl];
411 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
412 fm_data->fk_metadata |= FKM_IPV6;
413 fm_mask->fk_metadata |= FKM_IPV6;
418 mask = &rte_flow_item_ipv6_mask;
420 fm_data->fk_header_select |= FKH_IPV6;
421 fm_mask->fk_header_select |= FKH_IPV6;
422 memcpy(&fm_data->l3.ip6, spec, sizeof(*spec));
423 memcpy(&fm_mask->l3.ip6, mask, sizeof(*mask));
428 enic_fm_copy_item_udp(struct copy_item_args *arg)
430 const struct rte_flow_item *item = arg->item;
431 const struct rte_flow_item_udp *spec = item->spec;
432 const struct rte_flow_item_udp *mask = item->mask;
433 const uint8_t lvl = arg->header_level;
434 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
435 struct fm_header_set *fm_data, *fm_mask;
437 ENICPMD_FUNC_TRACE();
438 fm_data = &entry->ftm_data.fk_hdrset[lvl];
439 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
440 fm_data->fk_metadata |= FKM_UDP;
441 fm_mask->fk_metadata |= FKM_UDP;
446 mask = &rte_flow_item_udp_mask;
448 fm_data->fk_header_select |= FKH_UDP;
449 fm_mask->fk_header_select |= FKH_UDP;
450 memcpy(&fm_data->l4.udp, spec, sizeof(*spec));
451 memcpy(&fm_mask->l4.udp, mask, sizeof(*mask));
456 enic_fm_copy_item_tcp(struct copy_item_args *arg)
458 const struct rte_flow_item *item = arg->item;
459 const struct rte_flow_item_tcp *spec = item->spec;
460 const struct rte_flow_item_tcp *mask = item->mask;
461 const uint8_t lvl = arg->header_level;
462 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
463 struct fm_header_set *fm_data, *fm_mask;
465 ENICPMD_FUNC_TRACE();
466 fm_data = &entry->ftm_data.fk_hdrset[lvl];
467 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
468 fm_data->fk_metadata |= FKM_TCP;
469 fm_mask->fk_metadata |= FKM_TCP;
474 mask = &rte_flow_item_tcp_mask;
476 fm_data->fk_header_select |= FKH_TCP;
477 fm_mask->fk_header_select |= FKH_TCP;
478 memcpy(&fm_data->l4.tcp, spec, sizeof(*spec));
479 memcpy(&fm_mask->l4.tcp, mask, sizeof(*mask));
484 enic_fm_copy_item_sctp(struct copy_item_args *arg)
486 const struct rte_flow_item *item = arg->item;
487 const struct rte_flow_item_sctp *spec = item->spec;
488 const struct rte_flow_item_sctp *mask = item->mask;
489 const uint8_t lvl = arg->header_level;
490 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
491 struct fm_header_set *fm_data, *fm_mask;
492 uint8_t *ip_proto_mask = NULL;
493 uint8_t *ip_proto = NULL;
496 ENICPMD_FUNC_TRACE();
497 fm_data = &entry->ftm_data.fk_hdrset[lvl];
498 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
500 * The NIC filter API has no flags for "match sctp", so explicitly
501 * set the protocol number in the IP pattern.
503 if (fm_data->fk_metadata & FKM_IPV4) {
504 struct rte_ipv4_hdr *ip;
505 ip = (struct rte_ipv4_hdr *)&fm_mask->l3.ip4;
506 ip_proto_mask = &ip->next_proto_id;
507 ip = (struct rte_ipv4_hdr *)&fm_data->l3.ip4;
508 ip_proto = &ip->next_proto_id;
510 } else if (fm_data->fk_metadata & FKM_IPV6) {
511 struct rte_ipv6_hdr *ip;
512 ip = (struct rte_ipv6_hdr *)&fm_mask->l3.ip6;
513 ip_proto_mask = &ip->proto;
514 ip = (struct rte_ipv6_hdr *)&fm_data->l3.ip6;
515 ip_proto = &ip->proto;
518 /* Need IPv4/IPv6 pattern first */
521 *ip_proto = IPPROTO_SCTP;
522 *ip_proto_mask = 0xff;
523 fm_data->fk_header_select |= l3_fkh;
524 fm_mask->fk_header_select |= l3_fkh;
529 mask = &rte_flow_item_sctp_mask;
531 fm_data->fk_header_select |= FKH_L4RAW;
532 fm_mask->fk_header_select |= FKH_L4RAW;
533 memcpy(fm_data->l4.rawdata, spec, sizeof(*spec));
534 memcpy(fm_mask->l4.rawdata, mask, sizeof(*mask));
539 enic_fm_copy_item_vxlan(struct copy_item_args *arg)
541 const struct rte_flow_item *item = arg->item;
542 const struct rte_flow_item_vxlan *spec = item->spec;
543 const struct rte_flow_item_vxlan *mask = item->mask;
544 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
545 struct fm_header_set *fm_data, *fm_mask;
547 ENICPMD_FUNC_TRACE();
548 /* Only 2 header levels (outer and inner) allowed */
549 if (arg->header_level > 0)
552 fm_data = &entry->ftm_data.fk_hdrset[0];
553 fm_mask = &entry->ftm_mask.fk_hdrset[0];
554 fm_data->fk_metadata |= FKM_VXLAN;
555 fm_mask->fk_metadata |= FKM_VXLAN;
556 /* items from here on out are inner header items */
557 arg->header_level = 1;
559 /* Match all if no spec */
563 mask = &rte_flow_item_vxlan_mask;
565 fm_data->fk_header_select |= FKH_VXLAN;
566 fm_mask->fk_header_select |= FKH_VXLAN;
567 memcpy(&fm_data->vxlan, spec, sizeof(*spec));
568 memcpy(&fm_mask->vxlan, mask, sizeof(*mask));
573 * Currently, raw pattern match is very limited. It is intended for matching
574 * UDP tunnel header (e.g. vxlan or geneve).
577 enic_fm_copy_item_raw(struct copy_item_args *arg)
579 const struct rte_flow_item *item = arg->item;
580 const struct rte_flow_item_raw *spec = item->spec;
581 const struct rte_flow_item_raw *mask = item->mask;
582 const uint8_t lvl = arg->header_level;
583 struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
584 struct fm_header_set *fm_data, *fm_mask;
586 ENICPMD_FUNC_TRACE();
587 /* Cannot be used for inner packet */
590 /* Need both spec and mask */
593 /* Only supports relative with offset 0 */
594 if (!spec->relative || spec->offset != 0 || spec->search ||
597 /* Need non-null pattern that fits within the NIC's filter pattern */
598 if (spec->length == 0 ||
599 spec->length + sizeof(struct rte_udp_hdr) > FM_LAYER_SIZE ||
600 !spec->pattern || !mask->pattern)
603 * Mask fields, including length, are often set to zero. Assume that
604 * means "same as spec" to avoid breaking existing apps. If length
605 * is not zero, then it should be >= spec length.
607 * No more pattern follows this, so append to the L4 layer instead of
608 * L5 to work with both recent and older VICs.
610 if (mask->length != 0 && mask->length < spec->length)
613 fm_data = &entry->ftm_data.fk_hdrset[lvl];
614 fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
615 fm_data->fk_header_select |= FKH_L4RAW;
616 fm_mask->fk_header_select |= FKH_L4RAW;
617 fm_data->fk_header_select &= ~FKH_UDP;
618 fm_mask->fk_header_select &= ~FKH_UDP;
619 memcpy(fm_data->l4.rawdata + sizeof(struct rte_udp_hdr),
620 spec->pattern, spec->length);
621 memcpy(fm_mask->l4.rawdata + sizeof(struct rte_udp_hdr),
622 mask->pattern, spec->length);
627 enic_fet_alloc(struct enic_flowman *fm, uint8_t ingress,
628 struct fm_key_template *key, int entries,
629 struct enic_fm_fet **fet_out)
631 struct fm_exact_match_table *cmd;
632 struct fm_header_set *hdr;
633 struct enic_fm_fet *fet;
637 ENICPMD_FUNC_TRACE();
638 fet = calloc(1, sizeof(struct enic_fm_fet));
641 cmd = &fm->cmd.va->fm_exact_match_table;
642 memset(cmd, 0, sizeof(*cmd));
643 cmd->fet_direction = ingress ? FM_INGRESS : FM_EGRESS;
644 cmd->fet_stage = FM_STAGE_LAST;
645 cmd->fet_max_entries = entries ? entries : FM_MAX_EXACT_TABLE_SIZE;
647 hdr = &cmd->fet_key.fk_hdrset[0];
648 memset(hdr, 0, sizeof(*hdr));
649 hdr->fk_header_select = FKH_IPV4 | FKH_UDP;
650 hdr->l3.ip4.fk_saddr = 0xFFFFFFFF;
651 hdr->l3.ip4.fk_daddr = 0xFFFFFFFF;
652 hdr->l4.udp.fk_source = 0xFFFF;
653 hdr->l4.udp.fk_dest = 0xFFFF;
654 fet->default_key = 1;
656 memcpy(&cmd->fet_key, key, sizeof(*key));
657 memcpy(&fet->key, key, sizeof(*key));
658 fet->default_key = 0;
660 cmd->fet_key.fk_packet_tag = 1;
662 args[0] = FM_EXACT_TABLE_ALLOC;
663 args[1] = fm->cmd.pa;
664 ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
666 ENICPMD_LOG(ERR, "cannot alloc exact match table: rc=%d", ret);
670 fet->handle = args[0];
671 fet->ingress = ingress;
672 ENICPMD_LOG(DEBUG, "allocated exact match table: handle=0x%" PRIx64,
679 enic_fet_free(struct enic_flowman *fm, struct enic_fm_fet *fet)
681 ENICPMD_FUNC_TRACE();
682 enic_fm_tbl_free(fm, fet->handle);
683 if (!fet->default_key)
684 TAILQ_REMOVE(&fm->fet_list, fet, list);
689 * Get the exact match table for the given combination of
690 * <group, ingress, key>. Allocate one on the fly as necessary.
693 enic_fet_get(struct enic_flowman *fm,
696 struct fm_key_template *key,
697 struct enic_fm_fet **fet_out,
698 struct rte_flow_error *error)
700 struct enic_fm_fet *fet;
702 ENICPMD_FUNC_TRACE();
703 /* See if we already have this table open */
704 TAILQ_FOREACH(fet, &fm->fet_list, list) {
705 if (fet->group == group && fet->ingress == ingress)
709 /* Jumping to a non-existing group? Use the default table */
711 fet = ingress ? fm->default_ig_fet : fm->default_eg_fet;
712 } else if (enic_fet_alloc(fm, ingress, key, 0, &fet)) {
713 return rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
715 NULL, "enic: cannot get exact match table");
718 /* Default table is never on the open table list */
719 if (!fet->default_key)
720 TAILQ_INSERT_HEAD(&fm->fet_list, fet, list);
724 ENICPMD_LOG(DEBUG, "fet_get: %s %s group=%u ref=%u",
725 fet->default_key ? "default" : "",
726 fet->ingress ? "ingress" : "egress",
727 fet->group, fet->ref);
732 enic_fet_put(struct enic_flowman *fm, struct enic_fm_fet *fet)
734 ENICPMD_FUNC_TRACE();
735 RTE_ASSERT(fet->ref > 0);
737 ENICPMD_LOG(DEBUG, "fet_put: %s %s group=%u ref=%u",
738 fet->default_key ? "default" : "",
739 fet->ingress ? "ingress" : "egress",
740 fet->group, fet->ref);
742 enic_fet_free(fm, fet);
745 /* Return 1 if current item is valid on top of the previous one. */
747 fm_item_stacking_valid(enum rte_flow_item_type prev_item,
748 const struct enic_fm_items *item_info,
749 uint8_t is_first_item)
751 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
753 ENICPMD_FUNC_TRACE();
754 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
755 if (prev_item == *allowed_items)
759 /* This is the first item in the stack. Check if that's cool */
760 if (is_first_item && item_info->valid_start_item)
766 * Build the flow manager match entry structure from the provided pattern.
767 * The pattern is validated as the items are copied.
770 enic_fm_copy_entry(struct enic_flowman *fm,
771 const struct rte_flow_item pattern[],
772 struct rte_flow_error *error)
774 const struct enic_fm_items *item_info;
775 enum rte_flow_item_type prev_item;
776 const struct rte_flow_item *item;
777 struct copy_item_args args;
778 uint8_t prev_header_level;
779 uint8_t is_first_item;
782 ENICPMD_FUNC_TRACE();
785 prev_item = RTE_FLOW_ITEM_TYPE_END;
787 args.fm_tcam_entry = &fm->tcam_entry;
788 args.header_level = 0;
789 prev_header_level = 0;
790 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
792 * Get info about how to validate and copy the item. If NULL
793 * is returned the nic does not support the item.
795 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
798 item_info = &enic_fm_items[item->type];
800 if (item->type > FM_MAX_ITEM_TYPE ||
801 item_info->copy_item == NULL) {
802 return rte_flow_error_set(error, ENOTSUP,
803 RTE_FLOW_ERROR_TYPE_ITEM,
804 NULL, "enic: unsupported item");
807 /* check to see if item stacking is valid */
808 if (!fm_item_stacking_valid(prev_item, item_info,
813 ret = item_info->copy_item(&args);
815 goto item_not_supported;
816 /* Going from outer to inner? Treat it as a new packet start */
817 if (prev_header_level != args.header_level) {
818 prev_item = RTE_FLOW_ITEM_TYPE_END;
821 prev_item = item->type;
824 prev_header_level = args.header_level;
829 return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM,
830 NULL, "enic: unsupported item type");
833 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
834 item, "enic: unsupported item stack");
838 flow_item_skip_void(const struct rte_flow_item **item)
841 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
846 append_template(void **template, uint8_t *off, const void *data, int len)
848 memcpy(*template, data, len);
849 *template = (char *)*template + len;
854 enic_fm_append_action_op(struct enic_flowman *fm,
855 struct fm_action_op *fm_op,
856 struct rte_flow_error *error)
860 count = fm->action_op_count;
861 ENICPMD_LOG(DEBUG, "append action op: idx=%d op=%u",
862 count, fm_op->fa_op);
863 if (count == FM_ACTION_OP_MAX) {
864 return rte_flow_error_set(error, EINVAL,
865 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
866 "too many action operations");
868 fm->action.fma_action_ops[count] = *fm_op;
869 fm->action_op_count = count + 1;
873 /* NIC requires that 1st steer appear before decap.
874 * Correct example: steer, decap, steer, steer, ...
877 enic_fm_reorder_action_op(struct enic_flowman *fm)
879 struct fm_action_op *op, *steer, *decap;
880 struct fm_action_op tmp_op;
882 ENICPMD_FUNC_TRACE();
883 /* Find 1st steer and decap */
884 op = fm->action.fma_action_ops;
887 while (op->fa_op != FMOP_END) {
888 if (!decap && op->fa_op == FMOP_DECAP_NOSTRIP)
890 else if (!steer && op->fa_op == FMOP_RQ_STEER)
894 /* If decap is before steer, swap */
895 if (steer && decap && decap < steer) {
896 op = fm->action.fma_action_ops;
897 ENICPMD_LOG(DEBUG, "swap decap %ld <-> steer %ld",
898 (long)(decap - op), (long)(steer - op));
905 /* VXLAN decap is done via flowman compound action */
907 enic_fm_copy_vxlan_decap(struct enic_flowman *fm,
908 struct fm_tcam_match_entry *fmt,
909 const struct rte_flow_action *action,
910 struct rte_flow_error *error)
912 struct fm_header_set *fm_data;
913 struct fm_action_op fm_op;
915 ENICPMD_FUNC_TRACE();
916 fm_data = &fmt->ftm_data.fk_hdrset[0];
917 if (!(fm_data->fk_metadata & FKM_VXLAN)) {
918 return rte_flow_error_set(error, EINVAL,
919 RTE_FLOW_ERROR_TYPE_ACTION, action,
920 "vxlan-decap: vxlan must be in pattern");
923 memset(&fm_op, 0, sizeof(fm_op));
924 fm_op.fa_op = FMOP_DECAP_NOSTRIP;
925 return enic_fm_append_action_op(fm, &fm_op, error);
928 /* VXLAN encap is done via flowman compound action */
930 enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
931 const struct rte_flow_item *item,
932 struct rte_flow_error *error)
934 struct fm_action_op fm_op;
935 struct rte_ether_hdr *eth;
940 ENICPMD_FUNC_TRACE();
941 memset(&fm_op, 0, sizeof(fm_op));
942 fm_op.fa_op = FMOP_ENCAP;
943 template = fm->action.fma_data;
946 * Copy flow items to the flowman template starting L2.
947 * L2 must be ethernet.
949 flow_item_skip_void(&item);
950 if (item->type != RTE_FLOW_ITEM_TYPE_ETH)
951 return rte_flow_error_set(error, EINVAL,
952 RTE_FLOW_ERROR_TYPE_ITEM, item,
953 "vxlan-encap: first item should be ethernet");
954 eth = (struct rte_ether_hdr *)template;
955 ethertype = ð->ether_type;
956 append_template(&template, &off, item->spec,
957 sizeof(struct rte_flow_item_eth));
959 flow_item_skip_void(&item);
961 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
962 const struct rte_flow_item_vlan *spec;
964 ENICPMD_LOG(DEBUG, "vxlan-encap: vlan");
966 fm_op.encap.outer_vlan = rte_be_to_cpu_16(spec->tci);
968 flow_item_skip_void(&item);
970 /* L3 must be IPv4, IPv6 */
971 switch (item->type) {
972 case RTE_FLOW_ITEM_TYPE_IPV4:
974 struct rte_ipv4_hdr *ip4;
976 ENICPMD_LOG(DEBUG, "vxlan-encap: ipv4");
977 *ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
978 ip4 = (struct rte_ipv4_hdr *)template;
980 * Offset of IPv4 length field and its initial value
981 * (IP + UDP + VXLAN) are specified in the action. The NIC
982 * will add inner packet length.
984 fm_op.encap.len1_offset = off +
985 offsetof(struct rte_ipv4_hdr, total_length);
986 fm_op.encap.len1_delta = sizeof(struct rte_ipv4_hdr) +
987 sizeof(struct rte_udp_hdr) +
988 sizeof(struct rte_vxlan_hdr);
989 append_template(&template, &off, item->spec,
990 sizeof(struct rte_ipv4_hdr));
991 ip4->version_ihl = RTE_IPV4_VHL_DEF;
992 if (ip4->time_to_live == 0)
993 ip4->time_to_live = IP_DEFTTL;
994 ip4->next_proto_id = IPPROTO_UDP;
997 case RTE_FLOW_ITEM_TYPE_IPV6:
999 struct rte_ipv6_hdr *ip6;
1001 ENICPMD_LOG(DEBUG, "vxlan-encap: ipv6");
1002 *ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1003 ip6 = (struct rte_ipv6_hdr *)template;
1004 fm_op.encap.len1_offset = off +
1005 offsetof(struct rte_ipv6_hdr, payload_len);
1006 fm_op.encap.len1_delta = sizeof(struct rte_udp_hdr) +
1007 sizeof(struct rte_vxlan_hdr);
1008 append_template(&template, &off, item->spec,
1009 sizeof(struct rte_ipv6_hdr));
1010 ip6->vtc_flow |= rte_cpu_to_be_32(IP6_VTC_FLOW);
1011 if (ip6->hop_limits == 0)
1012 ip6->hop_limits = IP_DEFTTL;
1013 ip6->proto = IPPROTO_UDP;
1017 return rte_flow_error_set(error,
1018 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1019 "vxlan-encap: L3 must be IPv4/IPv6");
1022 flow_item_skip_void(&item);
1025 if (item->type != RTE_FLOW_ITEM_TYPE_UDP)
1026 return rte_flow_error_set(error, EINVAL,
1027 RTE_FLOW_ERROR_TYPE_ITEM, item,
1028 "vxlan-encap: UDP must follow IPv4/IPv6");
1029 /* UDP length = UDP + VXLAN. NIC will add inner packet length. */
1030 fm_op.encap.len2_offset =
1031 off + offsetof(struct rte_udp_hdr, dgram_len);
1032 fm_op.encap.len2_delta =
1033 sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr);
1034 append_template(&template, &off, item->spec,
1035 sizeof(struct rte_udp_hdr));
1037 flow_item_skip_void(&item);
1040 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN)
1041 return rte_flow_error_set(error,
1042 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1043 "vxlan-encap: VXLAN must follow UDP");
1044 append_template(&template, &off, item->spec,
1045 sizeof(struct rte_flow_item_vxlan));
1048 * Fill in the rest of the action structure.
1049 * Indicate that we want to encap with vxlan at packet start.
1051 fm_op.encap.template_offset = 0;
1052 fm_op.encap.template_len = off;
1053 return enic_fm_append_action_op(fm, &fm_op, error);
1057 enic_fm_find_vnic(struct enic *enic, const struct rte_pci_addr *addr,
1064 ENICPMD_FUNC_TRACE();
1065 ENICPMD_LOG(DEBUG, "bdf=%x:%x:%x", addr->bus, addr->devid,
1067 bdf = addr->bus << 8 | addr->devid << 3 | addr->function;
1068 args[0] = FM_VNIC_FIND;
1070 rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
1072 ENICPMD_LOG(ERR, "allocating counters rc=%d", rc);
1076 ENICPMD_LOG(DEBUG, "found vnic: handle=0x%" PRIx64, *handle);
1080 /* Translate flow actions to flowman TCAM entry actions */
1082 enic_fm_copy_action(struct enic_flowman *fm,
1083 const struct rte_flow_action actions[],
1085 struct rte_flow_error *error)
1094 struct fm_tcam_match_entry *fmt;
1095 struct fm_action_op fm_op;
1102 ENICPMD_FUNC_TRACE();
1103 fmt = &fm->tcam_entry;
1107 vnic_h = 0; /* 0 = current vNIC */
1108 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1109 switch (actions->type) {
1110 case RTE_FLOW_ACTION_TYPE_VOID:
1112 case RTE_FLOW_ACTION_TYPE_PASSTHRU: {
1113 if (overlap & PASSTHRU)
1115 overlap |= PASSTHRU;
1118 case RTE_FLOW_ACTION_TYPE_JUMP: {
1119 const struct rte_flow_action_jump *jump =
1121 struct enic_fm_fet *fet;
1125 ret = enic_fet_get(fm, jump->group, ingress, NULL,
1130 memset(&fm_op, 0, sizeof(fm_op));
1131 fm_op.fa_op = FMOP_EXACT_MATCH;
1132 fm_op.exact.handle = fet->handle;
1134 ret = enic_fm_append_action_op(fm, &fm_op, error);
1139 case RTE_FLOW_ACTION_TYPE_MARK: {
1140 const struct rte_flow_action_mark *mark =
1143 if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1144 return rte_flow_error_set(error, EINVAL,
1145 RTE_FLOW_ERROR_TYPE_ACTION,
1146 NULL, "invalid mark id");
1147 memset(&fm_op, 0, sizeof(fm_op));
1148 fm_op.fa_op = FMOP_MARK;
1149 fm_op.mark.mark = mark->id + 1;
1150 ret = enic_fm_append_action_op(fm, &fm_op, error);
1155 case RTE_FLOW_ACTION_TYPE_FLAG: {
1156 /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1157 memset(&fm_op, 0, sizeof(fm_op));
1158 fm_op.fa_op = FMOP_MARK;
1159 fm_op.mark.mark = ENIC_MAGIC_FILTER_ID;
1160 ret = enic_fm_append_action_op(fm, &fm_op, error);
1165 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1166 const struct rte_flow_action_queue *queue =
1170 * If fate other than QUEUE or RSS, fail. Multiple
1171 * rss and queue actions are ok.
1173 if ((overlap & FATE) && first_rq)
1177 memset(&fm_op, 0, sizeof(fm_op));
1178 fm_op.fa_op = FMOP_RQ_STEER;
1179 fm_op.rq_steer.rq_index =
1180 enic_rte_rq_idx_to_sop_idx(queue->index);
1181 fm_op.rq_steer.rq_count = 1;
1182 fm_op.rq_steer.vnic_handle = vnic_h;
1183 ret = enic_fm_append_action_op(fm, &fm_op, error);
1186 ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
1187 fm_op.rq_steer.rq_index);
1190 case RTE_FLOW_ACTION_TYPE_DROP: {
1194 memset(&fm_op, 0, sizeof(fm_op));
1195 fm_op.fa_op = FMOP_DROP;
1196 ret = enic_fm_append_action_op(fm, &fm_op, error);
1199 ENICPMD_LOG(DEBUG, "create DROP action");
1202 case RTE_FLOW_ACTION_TYPE_COUNT: {
1203 if (overlap & COUNT)
1206 /* Count is associated with entry not action on VIC. */
1207 fmt->ftm_flags |= FMEF_COUNTER;
1210 case RTE_FLOW_ACTION_TYPE_RSS: {
1211 const struct rte_flow_action_rss *rss = actions->conf;
1216 * If fate other than QUEUE or RSS, fail. Multiple
1217 * rss and queue actions are ok.
1219 if ((overlap & FATE) && first_rq)
1225 * Hardware only supports RSS actions on outer level
1226 * with default type and function. Queues must be
1229 allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
1230 rss->level == 0 && (rss->types == 0 ||
1231 rss->types == enic->rss_hf) &&
1232 rss->queue_num <= enic->rq_count &&
1233 rss->queue[rss->queue_num - 1] < enic->rq_count;
1236 /* Identity queue map needs to be sequential */
1237 for (i = 1; i < rss->queue_num; i++)
1238 allow = allow && (rss->queue[i] ==
1239 rss->queue[i - 1] + 1);
1243 memset(&fm_op, 0, sizeof(fm_op));
1244 fm_op.fa_op = FMOP_RQ_STEER;
1245 fm_op.rq_steer.rq_index =
1246 enic_rte_rq_idx_to_sop_idx(rss->queue[0]);
1247 fm_op.rq_steer.rq_count = rss->queue_num;
1248 fm_op.rq_steer.vnic_handle = vnic_h;
1249 ret = enic_fm_append_action_op(fm, &fm_op, error);
1252 ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
1253 fm_op.rq_steer.rq_index);
1256 case RTE_FLOW_ACTION_TYPE_PORT_ID: {
1257 const struct rte_flow_action_port_id *port;
1258 struct rte_pci_device *pdev;
1259 struct rte_eth_dev *dev;
1261 port = actions->conf;
1262 if (port->original) {
1263 vnic_h = 0; /* This port */
1266 ENICPMD_LOG(DEBUG, "port id %u", port->id);
1267 if (!rte_eth_dev_is_valid_port(port->id)) {
1268 return rte_flow_error_set(error, EINVAL,
1269 RTE_FLOW_ERROR_TYPE_ACTION,
1270 NULL, "invalid port_id");
1272 dev = &rte_eth_devices[port->id];
1273 if (!dev_is_enic(dev)) {
1274 return rte_flow_error_set(error, EINVAL,
1275 RTE_FLOW_ERROR_TYPE_ACTION,
1276 NULL, "port_id is not enic");
1278 pdev = RTE_ETH_DEV_TO_PCI(dev);
1279 if (enic_fm_find_vnic(enic, &pdev->addr, &vnic_h)) {
1280 return rte_flow_error_set(error, EINVAL,
1281 RTE_FLOW_ERROR_TYPE_ACTION,
1282 NULL, "port_id is not vnic");
1286 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: {
1287 if (overlap & DECAP)
1291 ret = enic_fm_copy_vxlan_decap(fm, fmt, actions,
1297 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: {
1298 const struct rte_flow_action_vxlan_encap *encap;
1300 encap = actions->conf;
1301 if (overlap & ENCAP)
1304 ret = enic_fm_copy_vxlan_encap(fm, encap->definition,
1315 if (!(overlap & (FATE | PASSTHRU | COUNT)))
1317 memset(&fm_op, 0, sizeof(fm_op));
1318 fm_op.fa_op = FMOP_END;
1319 ret = enic_fm_append_action_op(fm, &fm_op, error);
1322 enic_fm_reorder_action_op(fm);
1326 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1327 NULL, "enic: unsupported action");
1330 /** Check if the action is supported */
1332 enic_fm_match_action(const struct rte_flow_action *action,
1333 const enum rte_flow_action_type *supported_actions)
1335 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1336 supported_actions++) {
1337 if (action->type == *supported_actions)
1343 /* Debug function to dump internal NIC action structure. */
1345 enic_fm_dump_tcam_actions(const struct fm_action *fm_action)
1347 /* Manually keep in sync with FMOP commands */
1348 const char *fmop_str[FMOP_OP_MAX] = {
1350 [FMOP_DROP] = "drop",
1351 [FMOP_RQ_STEER] = "steer",
1352 [FMOP_EXACT_MATCH] = "exmatch",
1353 [FMOP_MARK] = "mark",
1354 [FMOP_EXT_MARK] = "ext_mark",
1356 [FMOP_EG_HAIRPIN] = "eg_hairpin",
1357 [FMOP_IG_HAIRPIN] = "ig_hairpin",
1358 [FMOP_ENCAP_IVLAN] = "encap_ivlan",
1359 [FMOP_ENCAP_NOIVLAN] = "encap_noivlan",
1360 [FMOP_ENCAP] = "encap",
1361 [FMOP_SET_OVLAN] = "set_ovlan",
1362 [FMOP_DECAP_NOSTRIP] = "decap_nostrip",
1364 const struct fm_action_op *op = &fm_action->fma_action_ops[0];
1365 char buf[128], *bp = buf;
1370 buf_len = sizeof(buf);
1371 for (i = 0; i < FM_ACTION_OP_MAX; i++) {
1372 if (op->fa_op == FMOP_END)
1374 if (op->fa_op >= FMOP_OP_MAX)
1377 op_str = fmop_str[op->fa_op];
1378 n = snprintf(bp, buf_len, "%s,", op_str);
1379 if (n > 0 && n < buf_len) {
1385 /* Remove trailing comma */
1388 ENICPMD_LOG(DEBUG, " Acions: %s", buf);
1392 bits_to_str(uint32_t bits, const char *strings[], int max,
1393 char *buf, int buf_len)
1395 int i, n = 0, len = 0;
1397 for (i = 0; i < max; i++) {
1398 if (bits & (1 << i)) {
1399 n = snprintf(buf, buf_len, "%s,", strings[i]);
1400 if (n > 0 && n < buf_len) {
1407 /* Remove trailing comma */
1415 /* Debug function to dump internal NIC filter structure. */
1417 __enic_fm_dump_tcam_match(const struct fm_header_set *fk_hdrset, char *buf,
1420 /* Manually keep in sync with FKM_BITS */
1421 const char *fm_fkm_str[FKM_BIT_COUNT] = {
1422 [FKM_QTAG_BIT] = "qtag",
1423 [FKM_CMD_BIT] = "cmd",
1424 [FKM_IPV4_BIT] = "ip4",
1425 [FKM_IPV6_BIT] = "ip6",
1426 [FKM_ROCE_BIT] = "roce",
1427 [FKM_UDP_BIT] = "udp",
1428 [FKM_TCP_BIT] = "tcp",
1429 [FKM_TCPORUDP_BIT] = "tcpportudp",
1430 [FKM_IPFRAG_BIT] = "ipfrag",
1431 [FKM_NVGRE_BIT] = "nvgre",
1432 [FKM_VXLAN_BIT] = "vxlan",
1433 [FKM_GENEVE_BIT] = "geneve",
1434 [FKM_NSH_BIT] = "nsh",
1435 [FKM_ROCEV2_BIT] = "rocev2",
1436 [FKM_VLAN_PRES_BIT] = "vlan_pres",
1437 [FKM_IPOK_BIT] = "ipok",
1438 [FKM_L4OK_BIT] = "l4ok",
1439 [FKM_ROCEOK_BIT] = "roceok",
1440 [FKM_FCSOK_BIT] = "fcsok",
1441 [FKM_EG_SPAN_BIT] = "eg_span",
1442 [FKM_IG_SPAN_BIT] = "ig_span",
1443 [FKM_EG_HAIRPINNED_BIT] = "eg_hairpinned",
1445 /* Manually keep in sync with FKH_BITS */
1446 const char *fm_fkh_str[FKH_BIT_COUNT] = {
1447 [FKH_ETHER_BIT] = "eth",
1448 [FKH_QTAG_BIT] = "qtag",
1449 [FKH_L2RAW_BIT] = "l2raw",
1450 [FKH_IPV4_BIT] = "ip4",
1451 [FKH_IPV6_BIT] = "ip6",
1452 [FKH_L3RAW_BIT] = "l3raw",
1453 [FKH_UDP_BIT] = "udp",
1454 [FKH_TCP_BIT] = "tcp",
1455 [FKH_ICMP_BIT] = "icmp",
1456 [FKH_VXLAN_BIT] = "vxlan",
1457 [FKH_L4RAW_BIT] = "l4raw",
1459 uint32_t fkh_bits = fk_hdrset->fk_header_select;
1460 uint32_t fkm_bits = fk_hdrset->fk_metadata;
1463 if (!fkm_bits && !fkh_bits)
1465 n = snprintf(buf, buf_len, "metadata(");
1466 if (n > 0 && n < buf_len) {
1470 n = bits_to_str(fkm_bits, fm_fkm_str, FKM_BIT_COUNT, buf, buf_len);
1471 if (n > 0 && n < buf_len) {
1475 n = snprintf(buf, buf_len, ") valid hdr fields(");
1476 if (n > 0 && n < buf_len) {
1480 n = bits_to_str(fkh_bits, fm_fkh_str, FKH_BIT_COUNT, buf, buf_len);
1481 if (n > 0 && n < buf_len) {
1485 snprintf(buf, buf_len, ")");
1489 enic_fm_dump_tcam_match(const struct fm_tcam_match_entry *match,
1494 memset(buf, 0, sizeof(buf));
1495 __enic_fm_dump_tcam_match(&match->ftm_mask.fk_hdrset[0],
1497 ENICPMD_LOG(DEBUG, " TCAM %s Outer: %s %scounter",
1498 (ingress) ? "IG" : "EG", buf,
1499 (match->ftm_flags & FMEF_COUNTER) ? "" : "no ");
1500 memset(buf, 0, sizeof(buf));
1501 __enic_fm_dump_tcam_match(&match->ftm_mask.fk_hdrset[1],
1504 ENICPMD_LOG(DEBUG, " Inner: %s", buf);
1507 /* Debug function to dump internal NIC flow structures. */
1509 enic_fm_dump_tcam_entry(const struct fm_tcam_match_entry *fm_match,
1510 const struct fm_action *fm_action,
1513 if (!rte_log_can_log(enic_pmd_logtype, RTE_LOG_DEBUG))
1515 enic_fm_dump_tcam_match(fm_match, ingress);
1516 enic_fm_dump_tcam_actions(fm_action);
1520 enic_fm_flow_parse(struct enic_flowman *fm,
1521 const struct rte_flow_attr *attrs,
1522 const struct rte_flow_item pattern[],
1523 const struct rte_flow_action actions[],
1524 struct rte_flow_error *error)
1526 const struct rte_flow_action *action;
1528 static const enum rte_flow_action_type *sa;
1530 ENICPMD_FUNC_TRACE();
1533 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1534 NULL, "no pattern specified");
1539 rte_flow_error_set(error, EINVAL,
1540 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1541 NULL, "no action specified");
1546 if (attrs->priority) {
1547 rte_flow_error_set(error, ENOTSUP,
1548 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1550 "priorities are not supported");
1552 } else if (attrs->transfer) {
1553 rte_flow_error_set(error, ENOTSUP,
1554 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1556 "transfer is not supported");
1558 } else if (attrs->ingress && attrs->egress) {
1559 rte_flow_error_set(error, ENOTSUP,
1560 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1562 "bidirectional rules not supported");
1567 rte_flow_error_set(error, EINVAL,
1568 RTE_FLOW_ERROR_TYPE_ATTR,
1569 NULL, "no attribute specified");
1573 /* Verify Actions. */
1574 sa = (attrs->ingress) ? enic_fm_supported_ig_actions :
1575 enic_fm_supported_eg_actions;
1576 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1578 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1580 else if (!enic_fm_match_action(action, sa))
1583 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1584 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1585 action, "invalid action");
1588 ret = enic_fm_copy_entry(fm, pattern, error);
1591 ret = enic_fm_copy_action(fm, actions, attrs->ingress, error);
1596 enic_fm_counter_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow)
1598 if (!fm_flow->counter_valid)
1600 SLIST_INSERT_HEAD(&fm->counters, fm_flow->counter, next);
1601 fm_flow->counter_valid = false;
1605 enic_fm_more_counters(struct enic_flowman *fm)
1607 struct enic_fm_counter *new_stack;
1608 struct enic_fm_counter *ctrs;
1613 ENICPMD_FUNC_TRACE();
1615 new_stack = rte_realloc(fm->counter_stack, (fm->counters_alloced +
1616 FM_COUNTERS_EXPAND) *
1617 sizeof(struct enic_fm_counter), 0);
1618 if (new_stack == NULL) {
1619 ENICPMD_LOG(ERR, "cannot alloc counter memory");
1622 fm->counter_stack = new_stack;
1624 args[0] = FM_COUNTER_BRK;
1625 args[1] = fm->counters_alloced + FM_COUNTERS_EXPAND;
1626 rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
1628 ENICPMD_LOG(ERR, "cannot alloc counters rc=%d", rc);
1631 ctrs = (struct enic_fm_counter *)fm->counter_stack +
1632 fm->counters_alloced;
1633 for (i = 0; i < FM_COUNTERS_EXPAND; i++, ctrs++) {
1634 ctrs->handle = fm->counters_alloced + i;
1635 SLIST_INSERT_HEAD(&fm->counters, ctrs, next);
1637 fm->counters_alloced += FM_COUNTERS_EXPAND;
1638 ENICPMD_LOG(DEBUG, "%u counters allocated, total: %u",
1639 FM_COUNTERS_EXPAND, fm->counters_alloced);
1644 enic_fm_counter_zero(struct enic_flowman *fm, struct enic_fm_counter *c)
1650 ENICPMD_FUNC_TRACE();
1652 args[0] = FM_COUNTER_QUERY;
1653 args[1] = c->handle;
1654 args[2] = 1; /* clear */
1655 ret = vnic_dev_flowman_cmd(enic->vdev, args, 3);
1657 ENICPMD_LOG(ERR, "counter init: rc=%d handle=0x%x",
1665 enic_fm_counter_alloc(struct enic_flowman *fm, struct rte_flow_error *error,
1666 struct enic_fm_counter **ctr)
1668 struct enic_fm_counter *c;
1671 ENICPMD_FUNC_TRACE();
1673 if (SLIST_EMPTY(&fm->counters)) {
1674 ret = enic_fm_more_counters(fm);
1676 return rte_flow_error_set(error, -ret,
1677 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1678 NULL, "enic: out of counters");
1680 c = SLIST_FIRST(&fm->counters);
1681 SLIST_REMOVE_HEAD(&fm->counters, next);
1687 enic_fm_action_free(struct enic_flowman *fm, uint64_t handle)
1692 ENICPMD_FUNC_TRACE();
1693 args[0] = FM_ACTION_FREE;
1695 rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
1697 ENICPMD_LOG(ERR, "cannot free action: rc=%d handle=0x%" PRIx64,
1703 enic_fm_entry_free(struct enic_flowman *fm, uint64_t handle)
1708 ENICPMD_FUNC_TRACE();
1709 args[0] = FM_MATCH_ENTRY_REMOVE;
1711 rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
1713 ENICPMD_LOG(ERR, "cannot free match entry: rc=%d"
1714 " handle=0x%" PRIx64, rc, handle);
1718 static struct enic_fm_jump_flow *
1719 find_jump_flow(struct enic_flowman *fm, uint32_t group)
1721 struct enic_fm_jump_flow *j;
1723 ENICPMD_FUNC_TRACE();
1724 TAILQ_FOREACH(j, &fm->jump_list, list) {
1725 if (j->group == group)
1732 remove_jump_flow(struct enic_flowman *fm, struct rte_flow *flow)
1734 struct enic_fm_jump_flow *j;
1736 ENICPMD_FUNC_TRACE();
1737 TAILQ_FOREACH(j, &fm->jump_list, list) {
1738 if (j->flow == flow) {
1739 TAILQ_REMOVE(&fm->jump_list, j, list);
1747 save_jump_flow(struct enic_flowman *fm,
1748 struct rte_flow *flow,
1750 struct fm_tcam_match_entry *match,
1751 struct fm_action *action)
1753 struct enic_fm_jump_flow *j;
1755 ENICPMD_FUNC_TRACE();
1756 j = calloc(1, sizeof(struct enic_fm_jump_flow));
1762 j->action = *action;
1763 TAILQ_INSERT_HEAD(&fm->jump_list, j, list);
1764 ENICPMD_LOG(DEBUG, "saved jump flow: flow=%p group=%u", flow, group);
1769 __enic_fm_flow_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow)
1771 if (fm_flow->entry_handle != FM_INVALID_HANDLE) {
1772 enic_fm_entry_free(fm, fm_flow->entry_handle);
1773 fm_flow->entry_handle = FM_INVALID_HANDLE;
1775 if (fm_flow->action_handle != FM_INVALID_HANDLE) {
1776 enic_fm_action_free(fm, fm_flow->action_handle);
1777 fm_flow->action_handle = FM_INVALID_HANDLE;
1779 enic_fm_counter_free(fm, fm_flow);
1781 enic_fet_put(fm, fm_flow->fet);
1782 fm_flow->fet = NULL;
1787 enic_fm_flow_free(struct enic_flowman *fm, struct rte_flow *flow)
1789 if (flow->fm->fet && flow->fm->fet->default_key)
1790 remove_jump_flow(fm, flow);
1791 __enic_fm_flow_free(fm, flow->fm);
1797 enic_fm_add_tcam_entry(struct enic_flowman *fm,
1798 struct fm_tcam_match_entry *match_in,
1799 uint64_t *entry_handle,
1801 struct rte_flow_error *error)
1803 struct fm_tcam_match_entry *ftm;
1807 ENICPMD_FUNC_TRACE();
1808 /* Copy entry to the command buffer */
1809 ftm = &fm->cmd.va->fm_tcam_match_entry;
1810 memcpy(ftm, match_in, sizeof(*ftm));
1811 /* Add TCAM entry */
1812 args[0] = FM_TCAM_ENTRY_INSTALL;
1813 args[1] = ingress ? fm->ig_tcam_hndl : fm->eg_tcam_hndl;
1814 args[2] = fm->cmd.pa;
1815 ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 3);
1817 ENICPMD_LOG(ERR, "cannot add %s TCAM entry: rc=%d",
1818 ingress ? "ingress" : "egress", ret);
1819 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1820 NULL, "enic: devcmd(tcam-entry-install)");
1823 ENICPMD_LOG(DEBUG, "installed %s TCAM entry: handle=0x%" PRIx64,
1824 ingress ? "ingress" : "egress", (uint64_t)args[0]);
1825 *entry_handle = args[0];
1830 enic_fm_add_exact_entry(struct enic_flowman *fm,
1831 struct fm_tcam_match_entry *match_in,
1832 uint64_t *entry_handle,
1833 struct enic_fm_fet *fet,
1834 struct rte_flow_error *error)
1836 struct fm_exact_match_entry *fem;
1840 ENICPMD_FUNC_TRACE();
1841 /* The new entry must have the table's key */
1842 if (memcmp(fet->key.fk_hdrset, match_in->ftm_mask.fk_hdrset,
1843 sizeof(struct fm_header_set) * FM_HDRSET_MAX)) {
1844 return rte_flow_error_set(error, EINVAL,
1845 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1846 "enic: key does not match group's key");
1849 /* Copy entry to the command buffer */
1850 fem = &fm->cmd.va->fm_exact_match_entry;
1852 * Translate TCAM entry to exact entry. As is only need to drop
1853 * position and mask. The mask is part of the exact match table.
1854 * Position (aka priority) is not supported in the exact match table.
1856 fem->fem_data = match_in->ftm_data;
1857 fem->fem_flags = match_in->ftm_flags;
1858 fem->fem_action = match_in->ftm_action;
1859 fem->fem_counter = match_in->ftm_counter;
1861 /* Add exact entry */
1862 args[0] = FM_EXACT_ENTRY_INSTALL;
1863 args[1] = fet->handle;
1864 args[2] = fm->cmd.pa;
1865 ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 3);
1867 ENICPMD_LOG(ERR, "cannot add %s exact entry: group=%u",
1868 fet->ingress ? "ingress" : "egress", fet->group);
1869 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1870 NULL, "enic: devcmd(exact-entry-install)");
1873 ENICPMD_LOG(DEBUG, "installed %s exact entry: group=%u"
1874 " handle=0x%" PRIx64,
1875 fet->ingress ? "ingress" : "egress", fet->group,
1877 *entry_handle = args[0];
1881 /* Push match-action to the NIC. */
1883 __enic_fm_flow_add_entry(struct enic_flowman *fm,
1884 struct enic_fm_flow *fm_flow,
1885 struct fm_tcam_match_entry *match_in,
1886 struct fm_action *action_in,
1889 struct rte_flow_error *error)
1891 struct enic_fm_counter *ctr;
1892 struct fm_action *fma;
1898 ENICPMD_FUNC_TRACE();
1899 /* Allocate action. */
1900 fma = &fm->cmd.va->fm_action;
1901 memcpy(fma, action_in, sizeof(*fma));
1902 args[0] = FM_ACTION_ALLOC;
1903 args[1] = fm->cmd.pa;
1904 ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
1906 ENICPMD_LOG(ERR, "allocating TCAM table action rc=%d", ret);
1907 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1908 NULL, "enic: devcmd(action-alloc)");
1912 fm_flow->action_handle = action_h;
1913 match_in->ftm_action = action_h;
1914 ENICPMD_LOG(DEBUG, "action allocated: handle=0x%" PRIx64, action_h);
1916 /* Allocate counter if requested. */
1917 if (match_in->ftm_flags & FMEF_COUNTER) {
1918 ret = enic_fm_counter_alloc(fm, error, &ctr);
1919 if (ret) /* error has been filled in */
1921 fm_flow->counter_valid = true;
1922 fm_flow->counter = ctr;
1923 match_in->ftm_counter = ctr->handle;
1927 * Get the group's table (either TCAM or exact match table) and
1928 * add entry to it. If we use the exact match table, the handler
1929 * will translate the TCAM entry (match_in) to the appropriate
1930 * exact match entry and use that instead.
1932 entry_h = FM_INVALID_HANDLE;
1933 if (group == FM_TCAM_RTE_GROUP) {
1934 ret = enic_fm_add_tcam_entry(fm, match_in, &entry_h, ingress,
1938 /* Jump action might have a ref to fet */
1939 fm_flow->fet = fm->fet;
1942 struct enic_fm_fet *fet = NULL;
1944 ret = enic_fet_get(fm, group, ingress,
1945 &match_in->ftm_mask, &fet, error);
1949 ret = enic_fm_add_exact_entry(fm, match_in, &entry_h, fet,
1954 /* Clear counter after adding entry, as it requires in-use counter */
1955 if (fm_flow->counter_valid) {
1956 ret = enic_fm_counter_zero(fm, fm_flow->counter);
1960 fm_flow->entry_handle = entry_h;
1964 /* Push match-action to the NIC. */
1965 static struct rte_flow *
1966 enic_fm_flow_add_entry(struct enic_flowman *fm,
1967 struct fm_tcam_match_entry *match_in,
1968 struct fm_action *action_in,
1969 const struct rte_flow_attr *attrs,
1970 struct rte_flow_error *error)
1972 struct enic_fm_flow *fm_flow;
1973 struct rte_flow *flow;
1975 ENICPMD_FUNC_TRACE();
1976 enic_fm_dump_tcam_entry(match_in, action_in, attrs->ingress);
1977 flow = calloc(1, sizeof(*flow));
1978 fm_flow = calloc(1, sizeof(*fm_flow));
1979 if (flow == NULL || fm_flow == NULL) {
1980 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1981 NULL, "enic: cannot allocate rte_flow");
1987 fm_flow->action_handle = FM_INVALID_HANDLE;
1988 fm_flow->entry_handle = FM_INVALID_HANDLE;
1989 if (__enic_fm_flow_add_entry(fm, fm_flow, match_in, action_in,
1990 attrs->group, attrs->ingress, error)) {
1991 enic_fm_flow_free(fm, flow);
1998 convert_jump_flows(struct enic_flowman *fm, struct enic_fm_fet *fet,
1999 struct rte_flow_error *error)
2001 struct enic_fm_flow *fm_flow;
2002 struct enic_fm_jump_flow *j;
2003 struct fm_action *fma;
2006 ENICPMD_FUNC_TRACE();
2008 * Find the saved flows that should jump to the new table (fet).
2009 * Then delete the old TCAM entry that jumps to the default table,
2010 * and add a new one that jumps to the new table.
2013 j = find_jump_flow(fm, group);
2015 ENICPMD_LOG(DEBUG, "convert jump flow: flow=%p group=%u",
2017 /* Delete old entry */
2018 fm_flow = j->flow->fm;
2019 __enic_fm_flow_free(fm, fm_flow);
2023 fma->fma_action_ops[0].exact.handle = fet->handle;
2024 if (__enic_fm_flow_add_entry(fm, fm_flow, &j->match, fma,
2025 FM_TCAM_RTE_GROUP, fet->ingress, error)) {
2026 /* Cannot roll back changes at the moment */
2027 ENICPMD_LOG(ERR, "cannot convert jump flow: flow=%p",
2032 ENICPMD_LOG(DEBUG, "convert ok: group=%u ref=%u",
2033 fet->group, fet->ref);
2036 TAILQ_REMOVE(&fm->jump_list, j, list);
2038 j = find_jump_flow(fm, group);
2043 enic_fm_open_scratch(struct enic_flowman *fm)
2045 fm->action_op_count = 0;
2047 memset(&fm->tcam_entry, 0, sizeof(fm->tcam_entry));
2048 memset(&fm->action, 0, sizeof(fm->action));
2052 enic_fm_close_scratch(struct enic_flowman *fm)
2055 enic_fet_put(fm, fm->fet);
2058 fm->action_op_count = 0;
2062 enic_fm_flow_validate(struct rte_eth_dev *dev,
2063 const struct rte_flow_attr *attrs,
2064 const struct rte_flow_item pattern[],
2065 const struct rte_flow_action actions[],
2066 struct rte_flow_error *error)
2068 struct fm_tcam_match_entry *fm_tcam_entry;
2069 struct fm_action *fm_action;
2070 struct enic_flowman *fm;
2073 ENICPMD_FUNC_TRACE();
2074 fm = pmd_priv(dev)->fm;
2077 enic_fm_open_scratch(fm);
2078 ret = enic_fm_flow_parse(fm, attrs, pattern, actions, error);
2080 fm_tcam_entry = &fm->tcam_entry;
2081 fm_action = &fm->action;
2082 enic_fm_dump_tcam_entry(fm_tcam_entry, fm_action,
2085 enic_fm_close_scratch(fm);
2090 enic_fm_flow_query_count(struct rte_eth_dev *dev,
2091 struct rte_flow *flow, void *data,
2092 struct rte_flow_error *error)
2094 struct rte_flow_query_count *query;
2095 struct enic_fm_flow *fm_flow;
2100 ENICPMD_FUNC_TRACE();
2101 enic = pmd_priv(dev);
2104 if (!fm_flow->counter_valid)
2105 return rte_flow_error_set(error, ENOTSUP,
2106 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2107 "enic: flow does not have counter");
2109 args[0] = FM_COUNTER_QUERY;
2110 args[1] = fm_flow->counter->handle;
2111 args[2] = query->reset;
2112 rc = vnic_dev_flowman_cmd(enic->vdev, args, 3);
2114 ENICPMD_LOG(ERR, "cannot query counter: rc=%d handle=0x%x",
2115 rc, fm_flow->counter->handle);
2118 query->hits_set = 1;
2119 query->hits = args[0];
2120 query->bytes_set = 1;
2121 query->bytes = args[1];
2126 enic_fm_flow_query(struct rte_eth_dev *dev,
2127 struct rte_flow *flow,
2128 const struct rte_flow_action *actions,
2130 struct rte_flow_error *error)
2134 ENICPMD_FUNC_TRACE();
2135 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2136 switch (actions->type) {
2137 case RTE_FLOW_ACTION_TYPE_VOID:
2139 case RTE_FLOW_ACTION_TYPE_COUNT:
2140 ret = enic_fm_flow_query_count(dev, flow, data, error);
2143 return rte_flow_error_set(error, ENOTSUP,
2144 RTE_FLOW_ERROR_TYPE_ACTION,
2146 "action not supported");
2154 static struct rte_flow *
2155 enic_fm_flow_create(struct rte_eth_dev *dev,
2156 const struct rte_flow_attr *attrs,
2157 const struct rte_flow_item pattern[],
2158 const struct rte_flow_action actions[],
2159 struct rte_flow_error *error)
2161 struct fm_tcam_match_entry *fm_tcam_entry;
2162 struct fm_action *fm_action;
2163 struct enic_flowman *fm;
2164 struct enic_fm_fet *fet;
2165 struct rte_flow *flow;
2169 ENICPMD_FUNC_TRACE();
2170 enic = pmd_priv(dev);
2173 rte_flow_error_set(error, ENOTSUP,
2174 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2175 "flowman is not initialized");
2178 enic_fm_open_scratch(fm);
2180 ret = enic_fm_flow_parse(fm, attrs, pattern, actions, error);
2182 goto error_with_scratch;
2183 fm_tcam_entry = &fm->tcam_entry;
2184 fm_action = &fm->action;
2185 flow = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
2188 LIST_INSERT_HEAD(&enic->flows, flow, next);
2189 fet = flow->fm->fet;
2190 if (fet && fet->default_key) {
2192 * Jump to non-existent group? Save the relevant info
2193 * so we can convert this flow when that group
2196 save_jump_flow(fm, flow, fet->group,
2197 fm_tcam_entry, fm_action);
2198 } else if (fet && fet->ref == 1) {
2200 * A new table is created. Convert the saved flows
2201 * that should jump to this group.
2203 convert_jump_flows(fm, fet, error);
2208 enic_fm_close_scratch(fm);
2213 enic_fm_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2214 __rte_unused struct rte_flow_error *error)
2216 struct enic *enic = pmd_priv(dev);
2218 ENICPMD_FUNC_TRACE();
2219 if (enic->fm == NULL)
2221 LIST_REMOVE(flow, next);
2222 enic_fm_flow_free(enic->fm, flow);
2227 enic_fm_flow_flush(struct rte_eth_dev *dev,
2228 __rte_unused struct rte_flow_error *error)
2230 struct enic_fm_flow *fm_flow;
2231 struct enic_flowman *fm;
2232 struct rte_flow *flow;
2233 struct enic *enic = pmd_priv(dev);
2235 ENICPMD_FUNC_TRACE();
2236 if (enic->fm == NULL)
2239 while (!LIST_EMPTY(&enic->flows)) {
2240 flow = LIST_FIRST(&enic->flows);
2242 LIST_REMOVE(flow, next);
2244 * If tables are null, then vNIC is closing, and the firmware
2245 * has already cleaned up flowman state. So do not try to free
2246 * resources, as it only causes errors.
2248 if (fm->ig_tcam_hndl == FM_INVALID_HANDLE) {
2249 fm_flow->entry_handle = FM_INVALID_HANDLE;
2250 fm_flow->action_handle = FM_INVALID_HANDLE;
2251 fm_flow->fet = NULL;
2253 enic_fm_flow_free(fm, flow);
2259 enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle)
2264 args[0] = FM_MATCH_TABLE_FREE;
2266 rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
2268 ENICPMD_LOG(ERR, "cannot free table: rc=%d handle=0x%" PRIx64,
2274 enic_fm_tcam_tbl_alloc(struct enic_flowman *fm, uint32_t direction,
2275 uint32_t max_entries, uint64_t *handle)
2277 struct fm_tcam_match_table *tcam_tbl;
2282 ENICPMD_FUNC_TRACE();
2284 tcam_tbl = &fm->cmd.va->fm_tcam_match_table;
2285 tcam_tbl->ftt_direction = direction;
2286 tcam_tbl->ftt_stage = FM_STAGE_LAST;
2287 tcam_tbl->ftt_max_entries = max_entries;
2288 args[0] = FM_TCAM_TABLE_ALLOC;
2289 args[1] = fm->cmd.pa;
2290 rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
2292 ENICPMD_LOG(ERR, "cannot alloc %s TCAM table: rc=%d",
2293 (direction == FM_INGRESS) ? "IG" : "EG", rc);
2297 ENICPMD_LOG(DEBUG, "%s TCAM table allocated, handle=0x%" PRIx64,
2298 (direction == FM_INGRESS) ? "IG" : "EG", *handle);
2303 enic_fm_init_counters(struct enic_flowman *fm)
2305 ENICPMD_FUNC_TRACE();
2306 SLIST_INIT(&fm->counters);
2307 return enic_fm_more_counters(fm);
2311 enic_fm_free_all_counters(struct enic_flowman *fm)
2318 args[0] = FM_COUNTER_BRK;
2320 rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
2322 ENICPMD_LOG(ERR, "cannot free counters: rc=%d", rc);
2323 rte_free(fm->counter_stack);
2327 enic_fm_alloc_tcam_tables(struct enic_flowman *fm)
2331 ENICPMD_FUNC_TRACE();
2332 rc = enic_fm_tcam_tbl_alloc(fm, FM_INGRESS, FM_MAX_TCAM_TABLE_SIZE,
2336 rc = enic_fm_tcam_tbl_alloc(fm, FM_EGRESS, FM_MAX_TCAM_TABLE_SIZE,
2342 enic_fm_free_tcam_tables(struct enic_flowman *fm)
2344 ENICPMD_FUNC_TRACE();
2345 if (fm->ig_tcam_hndl) {
2346 ENICPMD_LOG(DEBUG, "free IG TCAM table handle=0x%" PRIx64,
2348 enic_fm_tbl_free(fm, fm->ig_tcam_hndl);
2349 fm->ig_tcam_hndl = FM_INVALID_HANDLE;
2351 if (fm->eg_tcam_hndl) {
2352 ENICPMD_LOG(DEBUG, "free EG TCAM table handle=0x%" PRIx64,
2354 enic_fm_tbl_free(fm, fm->eg_tcam_hndl);
2355 fm->eg_tcam_hndl = FM_INVALID_HANDLE;
2360 enic_fm_init(struct enic *enic)
2362 struct enic_flowman *fm;
2363 uint8_t name[RTE_MEMZONE_NAMESIZE];
2366 if (enic->flow_filter_mode != FILTER_FLOWMAN)
2368 ENICPMD_FUNC_TRACE();
2369 fm = calloc(1, sizeof(*fm));
2371 ENICPMD_LOG(ERR, "cannot alloc flowman struct");
2375 TAILQ_INIT(&fm->fet_list);
2376 TAILQ_INIT(&fm->jump_list);
2377 /* Allocate host memory for flowman commands */
2378 snprintf((char *)name, sizeof(name), "fm-cmd-%s", enic->bdf_name);
2379 fm->cmd.va = enic_alloc_consistent(enic,
2380 sizeof(union enic_flowman_cmd_mem), &fm->cmd.pa, name);
2382 ENICPMD_LOG(ERR, "cannot allocate flowman command memory");
2386 /* Allocate TCAM tables upfront as they are the main tables */
2387 rc = enic_fm_alloc_tcam_tables(fm);
2389 ENICPMD_LOG(ERR, "cannot alloc TCAM tables");
2392 /* Then a number of counters */
2393 rc = enic_fm_init_counters(fm);
2395 ENICPMD_LOG(ERR, "cannot alloc counters");
2399 * One default exact match table for each direction. We hold onto
2402 rc = enic_fet_alloc(fm, 1, NULL, 128, &fm->default_ig_fet);
2404 ENICPMD_LOG(ERR, "cannot alloc default IG exact match table");
2405 goto error_counters;
2407 fm->default_ig_fet->ref = 1;
2408 rc = enic_fet_alloc(fm, 0, NULL, 128, &fm->default_eg_fet);
2410 ENICPMD_LOG(ERR, "cannot alloc default EG exact match table");
2413 fm->default_eg_fet->ref = 1;
2418 enic_fet_free(fm, fm->default_ig_fet);
2420 enic_fm_free_all_counters(fm);
2422 enic_fm_free_tcam_tables(fm);
2424 enic_free_consistent(enic, sizeof(union enic_flowman_cmd_mem),
2425 fm->cmd.va, fm->cmd.pa);
2432 enic_fm_destroy(struct enic *enic)
2434 struct enic_flowman *fm;
2435 struct enic_fm_fet *fet;
2437 if (enic->fm == NULL)
2439 ENICPMD_FUNC_TRACE();
2441 enic_fet_free(fm, fm->default_eg_fet);
2442 enic_fet_free(fm, fm->default_ig_fet);
2443 /* Free all exact match tables still open */
2444 while (!TAILQ_EMPTY(&fm->fet_list)) {
2445 fet = TAILQ_FIRST(&fm->fet_list);
2446 enic_fet_free(fm, fet);
2448 enic_fm_free_tcam_tables(fm);
2449 enic_fm_free_all_counters(fm);
2450 enic_free_consistent(enic, sizeof(union enic_flowman_cmd_mem),
2451 fm->cmd.va, fm->cmd.pa);
2457 const struct rte_flow_ops enic_fm_flow_ops = {
2458 .validate = enic_fm_flow_validate,
2459 .create = enic_fm_flow_create,
2460 .destroy = enic_fm_flow_destroy,
2461 .flush = enic_fm_flow_flush,
2462 .query = enic_fm_flow_query,