net/bnxt: add Truflow flush-timer to alloc table scope
[dpdk.git] / drivers / net / bnxt / bnxt_rxr.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #ifndef _BNXT_RXR_H_
7 #define _BNXT_RXR_H_
8 #include "hsi_struct_def_dpdk.h"
9
10 #define B_RX_DB(db, prod)                                               \
11                 (*(uint32_t *)db = (DB_KEY_RX | (prod)))
12
13 #define BNXT_TPA_L4_SIZE(x)     \
14         { \
15                 typeof(x) hdr_info = (x); \
16                 (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \
17         }
18
19 #define BNXT_TPA_INNER_L3_OFF(hdr_info) \
20         (((hdr_info) >> 18) & 0x1ff)
21
22 #define BNXT_TPA_INNER_L2_OFF(hdr_info) \
23         (((hdr_info) >> 9) & 0x1ff)
24
25 #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
26         ((hdr_info) & 0x1ff)
27
28 #define flags2_0xf(rxcmp1)      \
29         (((rxcmp1)->flags2) & 0xf)
30
31 /* IP non tunnel can be with or without L4-
32  * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP Or
33  * Ether / (vlan) / outer IP|IP6 / ICMP
34  * we use '==' instead of '&' because tunnel pkts have all 4 fields set.
35  */
36 #define IS_IP_NONTUNNEL_PKT(flags2_f)   \
37         (       \
38          ((flags2_f) == \
39           (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC))) || \
40          ((flags2_f) == \
41           (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
42                             RX_PKT_CMPL_FLAGS2_L4_CS_CALC))) \
43         )
44
45 /* IP Tunnel pkt must have atleast tunnel-IP-calc set.
46  * again tunnel ie outer L4 is optional bcoz of
47  * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
48  * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
49  *           UDP|TCP|SCTP
50  * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
51  *           UDP|TCP|SCTP
52  * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
53  *           UDP|TCP|SCTP
54  * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
55  * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
56  * also inner L3 chksum error is not taken into consideration by DPDK.
57  */
58 #define IS_IP_TUNNEL_PKT(flags2_f)      \
59         ((flags2_f) & rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC))
60
61 /* RX_PKT_CMPL_ERRORS_IP_CS_ERROR only for Non-tunnel pkts.
62  * For tunnel pkts RX_PKT_CMPL_ERRORS_IP_CS_ERROR is not accounted and treated
63  * as good csum pkt.
64  */
65 #define RX_CMP_IP_CS_ERROR(rxcmp1)      \
66         ((rxcmp1)->errors_v2 &  \
67          rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
68
69 #define RX_CMP_IP_OUTER_CS_ERROR(rxcmp1)        \
70         ((rxcmp1)->errors_v2 &  \
71          rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR))
72
73 #define RX_CMP_IP_CS_BITS       \
74         rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
75                          RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
76
77 #define RX_CMP_IP_CS_UNKNOWN(rxcmp1)    \
78                 !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
79
80 /* L4 non tunnel pkt-
81  * Ether / (vlan) / IP6 / UDP|TCP|SCTP
82  */
83 #define IS_L4_NONTUNNEL_PKT(flags2_f)   \
84         ( \
85           ((flags2_f) == \
86            (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC |    \
87                              RX_PKT_CMPL_FLAGS2_L4_CS_CALC))))
88
89 /* L4 tunnel pkt-
90  * Outer L4 is not mandatory. Eg: GRE-
91  * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
92  * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
93  *           UDP|TCP|SCTP
94  */
95 #define IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f)    \
96          ((flags2_f) == \
97           (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC |     \
98                             RX_PKT_CMPL_FLAGS2_L4_CS_CALC |     \
99                             RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC |   \
100                             RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)))
101
102 #define IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f)     \
103          ((flags2_f) == \
104           (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC |     \
105                             RX_PKT_CMPL_FLAGS2_L4_CS_CALC |     \
106                             RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)))
107
108 #define IS_L4_TUNNEL_PKT(flags2_f)      \
109         (       \
110                 IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) || \
111                 IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f)     \
112         )
113
114 #define BNXT_TPA_START_AGG_ID_PRE_TH(cmp) \
115         ((rte_le_to_cpu_16((cmp)->agg_id) & RX_TPA_START_CMPL_AGG_ID_MASK) >> \
116          RX_TPA_START_CMPL_AGG_ID_SFT)
117
118 #define BNXT_TPA_START_AGG_ID_TH(cmp) \
119         rte_le_to_cpu_16((cmp)->agg_id)
120
121 static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp,
122                                              struct rx_tpa_start_cmpl *cmp)
123 {
124         if (BNXT_CHIP_THOR(bp))
125                 return BNXT_TPA_START_AGG_ID_TH(cmp);
126         else
127                 return BNXT_TPA_START_AGG_ID_PRE_TH(cmp);
128 }
129
130 #define BNXT_TPA_END_AGG_BUFS(cmp) \
131         (((cmp)->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) \
132          >> RX_TPA_END_CMPL_AGG_BUFS_SFT)
133
134 #define BNXT_TPA_END_AGG_BUFS_TH(cmp) \
135         ((cmp)->tpa_agg_bufs)
136
137 #define BNXT_TPA_END_AGG_ID(cmp) \
138         (((cmp)->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> \
139          RX_TPA_END_CMPL_AGG_ID_SFT)
140
141 #define BNXT_TPA_END_AGG_ID_TH(cmp) \
142         rte_le_to_cpu_16((cmp)->agg_id)
143
144 #define RX_CMP_L4_CS_BITS       \
145         rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
146
147 #define RX_CMP_L4_CS_UNKNOWN(rxcmp1)                                    \
148             !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
149
150 #define RX_CMP_T_L4_CS_BITS     \
151         rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
152
153 #define RX_CMP_T_L4_CS_UNKNOWN(rxcmp1)                                  \
154             !((rxcmp1)->flags2 & RX_CMP_T_L4_CS_BITS)
155
156 /* Outer L4 chksum error
157  */
158 #define RX_CMP_L4_OUTER_CS_ERR2(rxcmp1) \
159          ((rxcmp1)->errors_v2 & \
160           rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))
161
162 /* Inner L4 chksum error
163  */
164 #define RX_CMP_L4_INNER_CS_ERR2(rxcmp1) \
165          ((rxcmp1)->errors_v2 & \
166           rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR))
167
168 #define BNXT_RX_POST_THRESH     32
169
170 enum pkt_hash_types {
171         PKT_HASH_TYPE_NONE,     /* Undefined type */
172         PKT_HASH_TYPE_L2,       /* Input: src_MAC, dest_MAC */
173         PKT_HASH_TYPE_L3,       /* Input: src_IP, dst_IP */
174         PKT_HASH_TYPE_L4,       /* Input: src_IP, dst_IP, src_port, dst_port */
175 };
176
177 struct bnxt_tpa_info {
178         struct rte_mbuf                 *mbuf;
179         uint16_t                        len;
180         uint32_t                        agg_count;
181         struct rx_tpa_v2_abuf_cmpl      agg_arr[TPA_MAX_NUM_SEGS];
182 };
183
184 struct bnxt_sw_rx_bd {
185         struct rte_mbuf         *mbuf; /* data associated with RX descriptor */
186 };
187
188 struct bnxt_rx_ring_info {
189         uint16_t                rx_prod;
190         uint16_t                ag_prod;
191         struct bnxt_db_info     rx_db;
192         struct bnxt_db_info     ag_db;
193
194         struct rx_prod_pkt_bd   *rx_desc_ring;
195         struct rx_prod_pkt_bd   *ag_desc_ring;
196         struct bnxt_sw_rx_bd    *rx_buf_ring; /* sw ring */
197         struct bnxt_sw_rx_bd    *ag_buf_ring; /* sw ring */
198
199         rte_iova_t              rx_desc_mapping;
200         rte_iova_t              ag_desc_mapping;
201
202         struct bnxt_ring        *rx_ring_struct;
203         struct bnxt_ring        *ag_ring_struct;
204
205         /*
206          * To deal with out of order return from TPA, use free buffer indicator
207          */
208         struct rte_bitmap       *ag_bitmap;
209
210         struct bnxt_tpa_info *tpa_info;
211 };
212
213 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
214                                uint16_t nb_pkts);
215 uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
216                               uint16_t nb_pkts);
217 void bnxt_free_rx_rings(struct bnxt *bp);
218 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
219 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
220 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
221 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
222
223 #ifdef RTE_ARCH_X86
224 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
225                             uint16_t nb_pkts);
226 int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);
227 #endif
228
229 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
230                            struct rx_pkt_cmpl_hi *rxcmp1,
231                            struct rte_mbuf *mbuf);
232
233 #define BNXT_RX_META_CFA_CODE_SHIFT             19
234 #define BNXT_CFA_CODE_META_SHIFT                16
235 #define BNXT_RX_META_CFA_CODE_INT_ACT_REC_BIT   0x8000000
236 #define BNXT_RX_META_CFA_CODE_EEM_BIT           0x4000000
237 #define BNXT_CFA_META_FMT_MASK                  0x70
238 #define BNXT_CFA_META_FMT_SHFT                  4
239 #define BNXT_CFA_META_FMT_EM_EEM_SHFT           1
240 #define BNXT_CFA_META_FMT_EEM                   3
241
242 #endif