7e31f81f13fa27cbc2d69ddc9c7e4ed0da6b64e3
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_tun.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #ifndef _BNXT_TUN_H_
7 #define _BNXT_TUN_H_
8
9 #include <inttypes.h>
10 #include <stdbool.h>
11 #include <sys/queue.h>
12
13 #include "rte_ethdev.h"
14
15 #include "ulp_template_db_enum.h"
16 #include "ulp_template_struct.h"
17
18 #define BNXT_OUTER_TUN_FLOW(l3_tun, params)             \
19         ((l3_tun) &&                                    \
20          ULP_BITMAP_ISSET((params)->act_bitmap.bits,    \
21                           BNXT_ULP_ACTION_BIT_JUMP))
22 #define BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params)          \
23         ((l3_tun) && (l3_tun_decap) &&                                  \
24          !ULP_BITMAP_ISSET((params)->hdr_bitmap.bits,                   \
25                            BNXT_ULP_HDR_BIT_O_ETH))
26
27 #define BNXT_CACHE_INNER_TUN_FLOW(state, inner_tun_sig) \
28         ((state) == BNXT_ULP_FLOW_STATE_NORMAL && (inner_tun_sig))
29 #define BNXT_INNER_TUN_FLOW(state, inner_tun_sig)               \
30         ((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (inner_tun_sig))
31
32 /* It is invalid to get another outer flow offload request
33  * for the same tunnel, while the outer flow is already offloaded.
34  */
35 #define BNXT_REJECT_OUTER_TUN_FLOW(state, outer_tun_sig)        \
36         ((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (outer_tun_sig))
37
38 #define ULP_TUN_O_DMAC_HDR_FIELD_INDEX  1
39 #define ULP_TUN_O_IPV4_DIP_INDEX        19
40 #define ULP_TUN_O_IPV6_DIP_INDEX        17
41
42 /* When a flow offload request comes the following state transitions
43  * happen based on the order in which the outer & inner flow offload
44  * requests arrive.
45  *
46  * If inner tunnel flow offload request arrives first then the flow
47  * state will remain in BNXT_ULP_FLOW_STATE_NORMAL state.
48  * The following outer tunnel flow offload request will change the
49  * state of the flow to BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from
50  * BNXT_ULP_FLOW_STATE_NORMAL.
51  *
52  * If outer tunnel flow offload request arrives first then the flow state
53  * will change from BNXT_ULP_FLOW_STATE_NORMAL to
54  * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD.
55  *
56  * Once the flow state is in BNXT_ULP_FLOW_STATE_TUN_O_OFFLD, any inner
57  * tunnel flow offload requests after that point will be treated as a
58  * normal flow and the tunnel flow state remains in
59  * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD
60  */
61 enum bnxt_ulp_tun_flow_state {
62         BNXT_ULP_FLOW_STATE_NORMAL = 0,
63         BNXT_ULP_FLOW_STATE_TUN_O_OFFLD,
64 };
65
66 struct ulp_per_port_flow_info {
67         enum bnxt_ulp_tun_flow_state            state;
68         uint32_t                                tun_i_cnt;
69         STAILQ_HEAD(, ulp_rte_parser_params)    tun_i_prms_list;
70 };
71
72 struct bnxt_tun_cache_entry {
73         bool                            t_dst_ip_valid;
74         uint8_t                         t_dmac[RTE_ETHER_ADDR_LEN];
75         union {
76                 rte_be32_t              t_dst_ip;
77                 uint8_t                 t_dst_ip6[16];
78         };
79         uint32_t                        outer_tun_flow_id;
80         uint16_t                        outer_tun_rej_cnt;
81         struct ulp_per_port_flow_info   tun_flow_info[RTE_MAX_ETHPORTS];
82 };
83
84 void
85 ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl);
86
87 void
88 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx);
89
90 void
91 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid);
92
93 #endif