1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include "hsi_struct_def_dpdk.h"
10 #define B_RX_DB(db, prod) \
11 (*(uint32_t *)db = (DB_KEY_RX | (prod)))
13 #define BNXT_TPA_L4_SIZE(x) \
15 typeof(x) hdr_info = (x); \
16 (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \
19 #define BNXT_TPA_INNER_L3_OFF(hdr_info) \
20 (((hdr_info) >> 18) & 0x1ff)
22 #define BNXT_TPA_INNER_L2_OFF(hdr_info) \
23 (((hdr_info) >> 9) & 0x1ff)
25 #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
28 #define flags2_0xf(rxcmp1) \
29 (((rxcmp1)->flags2) & 0xf)
31 /* IP non tunnel can be with or without L4-
32 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP Or
33 * Ether / (vlan) / outer IP|IP6 / ICMP
34 * we use '==' instead of '&' because tunnel pkts have all 4 fields set.
36 #define IS_IP_NONTUNNEL_PKT(flags2_f) \
39 (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC))) || \
41 (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
42 RX_PKT_CMPL_FLAGS2_L4_CS_CALC))) \
45 /* IP Tunnel pkt must have atleast tunnel-IP-calc set.
46 * again tunnel ie outer L4 is optional bcoz of
47 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
48 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
50 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
52 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
54 * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
55 * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
56 * also inner L3 chksum error is not taken into consideration by DPDK.
58 #define IS_IP_TUNNEL_PKT(flags2_f) \
59 ((flags2_f) & rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC))
61 /* RX_PKT_CMPL_ERRORS_IP_CS_ERROR only for Non-tunnel pkts.
62 * For tunnel pkts RX_PKT_CMPL_ERRORS_IP_CS_ERROR is not accounted and treated
65 #define RX_CMP_IP_CS_ERROR(rxcmp1) \
66 ((rxcmp1)->errors_v2 & \
67 rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
69 #define RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) \
70 ((rxcmp1)->errors_v2 & \
71 rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR))
73 #define RX_CMP_IP_CS_BITS \
74 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
75 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
77 #define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
78 !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
81 * Ether / (vlan) / IP6 / UDP|TCP|SCTP
83 #define IS_L4_NONTUNNEL_PKT(flags2_f) \
86 (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
87 RX_PKT_CMPL_FLAGS2_L4_CS_CALC))))
90 * Outer L4 is not mandatory. Eg: GRE-
91 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
92 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
95 #define IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) \
97 (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
98 RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
99 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC | \
100 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)))
102 #define IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \
104 (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
105 RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
106 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)))
108 #define IS_L4_TUNNEL_PKT(flags2_f) \
110 IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) || \
111 IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \
114 #define BNXT_TPA_START_AGG_ID_PRE_TH(cmp) \
115 ((rte_le_to_cpu_16((cmp)->agg_id) & RX_TPA_START_CMPL_AGG_ID_MASK) >> \
116 RX_TPA_START_CMPL_AGG_ID_SFT)
118 #define BNXT_TPA_START_AGG_ID_TH(cmp) \
119 rte_le_to_cpu_16((cmp)->agg_id)
121 static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp,
122 struct rx_tpa_start_cmpl *cmp)
124 if (BNXT_CHIP_THOR(bp))
125 return BNXT_TPA_START_AGG_ID_TH(cmp);
127 return BNXT_TPA_START_AGG_ID_PRE_TH(cmp);
130 #define BNXT_TPA_END_AGG_BUFS(cmp) \
131 (((cmp)->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) \
132 >> RX_TPA_END_CMPL_AGG_BUFS_SFT)
134 #define BNXT_TPA_END_AGG_BUFS_TH(cmp) \
135 ((cmp)->tpa_agg_bufs)
137 #define BNXT_TPA_END_AGG_ID(cmp) \
138 (((cmp)->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> \
139 RX_TPA_END_CMPL_AGG_ID_SFT)
141 #define BNXT_TPA_END_AGG_ID_TH(cmp) \
142 rte_le_to_cpu_16((cmp)->agg_id)
144 #define RX_CMP_L4_CS_BITS \
145 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
147 #define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \
148 !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
150 #define RX_CMP_T_L4_CS_BITS \
151 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
153 #define RX_CMP_T_L4_CS_UNKNOWN(rxcmp1) \
154 !((rxcmp1)->flags2 & RX_CMP_T_L4_CS_BITS)
156 /* Outer L4 chksum error
158 #define RX_CMP_L4_OUTER_CS_ERR2(rxcmp1) \
159 ((rxcmp1)->errors_v2 & \
160 rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))
162 /* Inner L4 chksum error
164 #define RX_CMP_L4_INNER_CS_ERR2(rxcmp1) \
165 ((rxcmp1)->errors_v2 & \
166 rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR))
168 #define BNXT_RX_POST_THRESH 32
170 enum pkt_hash_types {
171 PKT_HASH_TYPE_NONE, /* Undefined type */
172 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
173 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
174 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
177 struct bnxt_tpa_info {
178 struct rte_mbuf *mbuf;
181 struct rx_tpa_v2_abuf_cmpl agg_arr[TPA_MAX_NUM_SEGS];
184 struct bnxt_sw_rx_bd {
185 struct rte_mbuf *mbuf; /* data associated with RX descriptor */
188 struct bnxt_rx_ring_info {
191 struct bnxt_db_info rx_db;
192 struct bnxt_db_info ag_db;
194 struct rx_prod_pkt_bd *rx_desc_ring;
195 struct rx_prod_pkt_bd *ag_desc_ring;
196 struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */
197 struct bnxt_sw_rx_bd *ag_buf_ring; /* sw ring */
199 rte_iova_t rx_desc_mapping;
200 rte_iova_t ag_desc_mapping;
202 struct bnxt_ring *rx_ring_struct;
203 struct bnxt_ring *ag_ring_struct;
206 * To deal with out of order return from TPA, use free buffer indicator
208 struct rte_bitmap *ag_bitmap;
210 struct bnxt_tpa_info *tpa_info;
213 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
215 uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
217 void bnxt_free_rx_rings(struct bnxt *bp);
218 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
219 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
220 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
221 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
224 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
226 int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);
229 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
230 struct rx_pkt_cmpl_hi *rxcmp1,
231 struct rte_mbuf *mbuf);
233 #define BNXT_RX_META_CFA_CODE_SHIFT 19
234 #define BNXT_CFA_CODE_META_SHIFT 16
235 #define BNXT_RX_META_CFA_CODE_INT_ACT_REC_BIT 0x8000000
236 #define BNXT_RX_META_CFA_CODE_EEM_BIT 0x4000000
237 #define BNXT_CFA_META_FMT_MASK 0x70
238 #define BNXT_CFA_META_FMT_SHFT 4
239 #define BNXT_CFA_META_FMT_EM_EEM_SHFT 1
240 #define BNXT_CFA_META_FMT_EEM 3