ethdev: promote sibling iterators to stable
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_tun.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #ifndef _BNXT_TUN_H_
7 #define _BNXT_TUN_H_
8
9 #include <inttypes.h>
10 #include <stdbool.h>
11 #include <sys/queue.h>
12
13 #include "rte_version.h"
14 #include "rte_ethdev.h"
15
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18
19 #define BNXT_OUTER_TUN_FLOW(l3_tun, params)             \
20         ((l3_tun) &&                                    \
21          ULP_BITMAP_ISSET((params)->act_bitmap.bits,    \
22                           BNXT_ULP_ACT_BIT_JUMP))
23 #define BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params)          \
24         ((l3_tun) && (l3_tun_decap) &&                                  \
25          !ULP_BITMAP_ISSET((params)->hdr_bitmap.bits,                   \
26                            BNXT_ULP_HDR_BIT_O_ETH))
27
28 #define BNXT_CACHE_INNER_TUN_FLOW(state, inner_tun_sig) \
29         ((state) == BNXT_ULP_FLOW_STATE_NORMAL && (inner_tun_sig))
30 #define BNXT_INNER_TUN_FLOW(state, inner_tun_sig)               \
31         ((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (inner_tun_sig))
32
33 /* It is invalid to get another outer flow offload request
34  * for the same tunnel, while the outer flow is already offloaded.
35  */
36 #define BNXT_REJECT_OUTER_TUN_FLOW(state, outer_tun_sig)        \
37         ((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (outer_tun_sig))
38
39 #define ULP_TUN_O_DMAC_HDR_FIELD_INDEX  1
40 #define ULP_TUN_O_IPV4_DIP_INDEX        19
41 #define ULP_TUN_O_IPV6_DIP_INDEX        17
42
43 /* When a flow offload request comes the following state transitions
44  * happen based on the order in which the outer & inner flow offload
45  * requests arrive.
46  *
47  * If inner tunnel flow offload request arrives first then the flow
48  * state will remain in BNXT_ULP_FLOW_STATE_NORMAL state.
49  * The following outer tunnel flow offload request will change the
50  * state of the flow to BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from
51  * BNXT_ULP_FLOW_STATE_NORMAL.
52  *
53  * If outer tunnel flow offload request arrives first then the flow state
54  * will change from BNXT_ULP_FLOW_STATE_NORMAL to
55  * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD.
56  *
57  * Once the flow state is in BNXT_ULP_FLOW_STATE_TUN_O_OFFLD, any inner
58  * tunnel flow offload requests after that point will be treated as a
59  * normal flow and the tunnel flow state remains in
60  * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD
61  */
62 enum bnxt_ulp_tun_flow_state {
63         BNXT_ULP_FLOW_STATE_NORMAL = 0,
64         BNXT_ULP_FLOW_STATE_TUN_O_OFFLD,
65 };
66
67 struct ulp_per_port_flow_info {
68         enum bnxt_ulp_tun_flow_state            state;
69         uint32_t                                tun_i_cnt;
70         STAILQ_HEAD(, ulp_rte_parser_params)    tun_i_prms_list;
71 };
72
73 struct bnxt_tun_cache_entry {
74         bool                            t_dst_ip_valid;
75         uint8_t                         t_dmac[RTE_ETHER_ADDR_LEN];
76         union {
77                 rte_be32_t              t_dst_ip;
78                 uint8_t                 t_dst_ip6[16];
79         };
80         uint32_t                        outer_tun_flow_id;
81         uint16_t                        outer_tun_rej_cnt;
82         struct ulp_per_port_flow_info   tun_flow_info[RTE_MAX_ETHPORTS];
83 };
84
85 void
86 ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl);
87
88 void
89 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx);
90
91 void
92 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid);
93
94 #endif