2516eaca2cc77302a5ef1bd2bf8755feb1d335f9
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_tun.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #ifndef _BNXT_TUN_H_
7 #define _BNXT_TUN_H_
8
9 #include <inttypes.h>
10 #include <stdbool.h>
11 #include <sys/queue.h>
12
13 #include "rte_version.h"
14 #include "rte_ethdev.h"
15
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18
19 #if RTE_VERSION_NUM(17, 11, 10, 16) == RTE_VERSION
20 #define RTE_ETHER_ADDR_LEN      ETHER_ADDR_LEN
21 #endif
22
23 #define BNXT_OUTER_TUN_FLOW(l3_tun, params)             \
24         ((l3_tun) &&                                    \
25          ULP_BITMAP_ISSET((params)->act_bitmap.bits,    \
26                           BNXT_ULP_ACT_BIT_JUMP))
27 #define BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params)          \
28         ((l3_tun) && (l3_tun_decap) &&                                  \
29          !ULP_BITMAP_ISSET((params)->hdr_bitmap.bits,                   \
30                            BNXT_ULP_HDR_BIT_O_ETH))
31
32 #define BNXT_CACHE_INNER_TUN_FLOW(state, inner_tun_sig) \
33         ((state) == BNXT_ULP_FLOW_STATE_NORMAL && (inner_tun_sig))
34 #define BNXT_INNER_TUN_FLOW(state, inner_tun_sig)               \
35         ((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (inner_tun_sig))
36
37 /* It is invalid to get another outer flow offload request
38  * for the same tunnel, while the outer flow is already offloaded.
39  */
40 #define BNXT_REJECT_OUTER_TUN_FLOW(state, outer_tun_sig)        \
41         ((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (outer_tun_sig))
42
43 #define ULP_TUN_O_DMAC_HDR_FIELD_INDEX  1
44 #define ULP_TUN_O_IPV4_DIP_INDEX        19
45 #define ULP_TUN_O_IPV6_DIP_INDEX        17
46
47 /* When a flow offload request comes the following state transitions
48  * happen based on the order in which the outer & inner flow offload
49  * requests arrive.
50  *
51  * If inner tunnel flow offload request arrives first then the flow
52  * state will remain in BNXT_ULP_FLOW_STATE_NORMAL state.
53  * The following outer tunnel flow offload request will change the
54  * state of the flow to BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from
55  * BNXT_ULP_FLOW_STATE_NORMAL.
56  *
57  * If outer tunnel flow offload request arrives first then the flow state
58  * will change from BNXT_ULP_FLOW_STATE_NORMAL to
59  * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD.
60  *
61  * Once the flow state is in BNXT_ULP_FLOW_STATE_TUN_O_OFFLD, any inner
62  * tunnel flow offload requests after that point will be treated as a
63  * normal flow and the tunnel flow state remains in
64  * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD
65  */
66 enum bnxt_ulp_tun_flow_state {
67         BNXT_ULP_FLOW_STATE_NORMAL = 0,
68         BNXT_ULP_FLOW_STATE_TUN_O_OFFLD,
69 };
70
71 struct ulp_per_port_flow_info {
72         enum bnxt_ulp_tun_flow_state            state;
73         uint32_t                                tun_i_cnt;
74         STAILQ_HEAD(, ulp_rte_parser_params)    tun_i_prms_list;
75 };
76
77 struct bnxt_tun_cache_entry {
78         bool                            t_dst_ip_valid;
79         uint8_t                         t_dmac[RTE_ETHER_ADDR_LEN];
80         union {
81                 rte_be32_t              t_dst_ip;
82                 uint8_t                 t_dst_ip6[16];
83         };
84         uint32_t                        outer_tun_flow_id;
85         uint16_t                        outer_tun_rej_cnt;
86         struct ulp_per_port_flow_info   tun_flow_info[RTE_MAX_ETHPORTS];
87 };
88
89 void
90 ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl);
91
92 void
93 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx);
94
95 void
96 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid);
97
98 #endif