net/bnxt: prevent device access when device is in reset
[dpdk.git] / drivers / net / bnxt / bnxt_rxr.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #ifndef _BNXT_RXR_H_
7 #define _BNXT_RXR_H_
8
9 #define B_RX_DB(db, prod)                                               \
10                 (*(uint32_t *)db = (DB_KEY_RX | (prod)))
11
12 #define BNXT_TPA_L4_SIZE(x)     \
13         { \
14                 typeof(x) hdr_info = (x); \
15                 (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \
16         }
17
18 #define BNXT_TPA_INNER_L3_OFF(hdr_info) \
19         (((hdr_info) >> 18) & 0x1ff)
20
21 #define BNXT_TPA_INNER_L2_OFF(hdr_info) \
22         (((hdr_info) >> 9) & 0x1ff)
23
24 #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
25         ((hdr_info) & 0x1ff)
26
27 #define flags2_0xf(rxcmp1)      \
28         (((rxcmp1)->flags2) & 0xf)
29
30 /* IP non tunnel can be with or without L4-
31  * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP Or
32  * Ether / (vlan) / outer IP|IP6 / ICMP
33  * we use '==' instead of '&' because tunnel pkts have all 4 fields set.
34  */
35 #define IS_IP_NONTUNNEL_PKT(flags2_f)   \
36         (       \
37          ((flags2_f) == \
38           (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC))) || \
39          ((flags2_f) == \
40           (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
41                             RX_PKT_CMPL_FLAGS2_L4_CS_CALC))) \
42         )
43
44 /* IP Tunnel pkt must have atleast tunnel-IP-calc set.
45  * again tunnel ie outer L4 is optional bcoz of
46  * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
47  * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
48  *           UDP|TCP|SCTP
49  * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
50  *           UDP|TCP|SCTP
51  * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
52  *           UDP|TCP|SCTP
53  * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
54  * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
55  * also inner L3 chksum error is not taken into consideration by DPDK.
56  */
57 #define IS_IP_TUNNEL_PKT(flags2_f)      \
58         ((flags2_f) & rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC))
59
60 /* RX_PKT_CMPL_ERRORS_IP_CS_ERROR only for Non-tunnel pkts.
61  * For tunnel pkts RX_PKT_CMPL_ERRORS_IP_CS_ERROR is not accounted and treated
62  * as good csum pkt.
63  */
64 #define RX_CMP_IP_CS_ERROR(rxcmp1)      \
65         ((rxcmp1)->errors_v2 &  \
66          rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
67
68 #define RX_CMP_IP_OUTER_CS_ERROR(rxcmp1)        \
69         ((rxcmp1)->errors_v2 &  \
70          rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR))
71
72 #define RX_CMP_IP_CS_BITS       \
73         rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
74                          RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
75
76 #define RX_CMP_IP_CS_UNKNOWN(rxcmp1)    \
77                 !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
78
79 /* L4 non tunnel pkt-
80  * Ether / (vlan) / IP6 / UDP|TCP|SCTP
81  */
82 #define IS_L4_NONTUNNEL_PKT(flags2_f)   \
83         ( \
84           ((flags2_f) == \
85            (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC |    \
86                              RX_PKT_CMPL_FLAGS2_L4_CS_CALC))))
87
88 /* L4 tunnel pkt-
89  * Outer L4 is not mandatory. Eg: GRE-
90  * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
91  * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
92  *           UDP|TCP|SCTP
93  */
94 #define IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f)    \
95          ((flags2_f) == \
96           (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC |     \
97                             RX_PKT_CMPL_FLAGS2_L4_CS_CALC |     \
98                             RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC |   \
99                             RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)))
100
101 #define IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f)     \
102          ((flags2_f) == \
103           (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC |     \
104                             RX_PKT_CMPL_FLAGS2_L4_CS_CALC |     \
105                             RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)))
106
107 #define IS_L4_TUNNEL_PKT(flags2_f)      \
108         (       \
109                 IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) || \
110                 IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f)     \
111         )
112
113 #define RX_CMP_L4_CS_BITS       \
114         rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
115
116 #define RX_CMP_L4_CS_UNKNOWN(rxcmp1)                                    \
117             !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
118
119 #define RX_CMP_T_L4_CS_BITS     \
120         rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
121
122 #define RX_CMP_T_L4_CS_UNKNOWN(rxcmp1)                                  \
123             !((rxcmp1)->flags2 & RX_CMP_T_L4_CS_BITS)
124
125 /* Outer L4 chksum error
126  */
127 #define RX_CMP_L4_OUTER_CS_ERR2(rxcmp1) \
128          ((rxcmp1)->errors_v2 & \
129           rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))
130
131 /* Inner L4 chksum error
132  */
133 #define RX_CMP_L4_INNER_CS_ERR2(rxcmp1) \
134          ((rxcmp1)->errors_v2 & \
135           rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR))
136
137 #define BNXT_RX_POST_THRESH     32
138
139 enum pkt_hash_types {
140         PKT_HASH_TYPE_NONE,     /* Undefined type */
141         PKT_HASH_TYPE_L2,       /* Input: src_MAC, dest_MAC */
142         PKT_HASH_TYPE_L3,       /* Input: src_IP, dst_IP */
143         PKT_HASH_TYPE_L4,       /* Input: src_IP, dst_IP, src_port, dst_port */
144 };
145
146 struct bnxt_tpa_info {
147         struct rte_mbuf         *mbuf;
148         uint16_t                        len;
149         unsigned short          gso_type;
150         uint32_t                        flags2;
151         uint32_t                        metadata;
152         enum pkt_hash_types     hash_type;
153         uint32_t                        rss_hash;
154         uint32_t                        hdr_info;
155 };
156
157 struct bnxt_sw_rx_bd {
158         struct rte_mbuf         *mbuf; /* data associated with RX descriptor */
159 };
160
161 struct bnxt_rx_ring_info {
162         uint16_t                rx_prod;
163         uint16_t                ag_prod;
164         struct bnxt_db_info     rx_db;
165         struct bnxt_db_info     ag_db;
166
167         struct rx_prod_pkt_bd   *rx_desc_ring;
168         struct rx_prod_pkt_bd   *ag_desc_ring;
169         struct bnxt_sw_rx_bd    *rx_buf_ring; /* sw ring */
170         struct bnxt_sw_rx_bd    *ag_buf_ring; /* sw ring */
171
172         rte_iova_t              rx_desc_mapping;
173         rte_iova_t              ag_desc_mapping;
174
175         struct bnxt_ring        *rx_ring_struct;
176         struct bnxt_ring        *ag_ring_struct;
177
178         /*
179          * To deal with out of order return from TPA, use free buffer indicator
180          */
181         struct rte_bitmap       *ag_bitmap;
182
183         struct bnxt_tpa_info *tpa_info;
184 };
185
186 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
187                                uint16_t nb_pkts);
188 uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
189                               uint16_t nb_pkts);
190 void bnxt_free_rx_rings(struct bnxt *bp);
191 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
192 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
193 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
194 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
195
196 #ifdef RTE_ARCH_X86
197 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
198                             uint16_t nb_pkts);
199 int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);
200 #endif
201
202 #endif