1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #define B_RX_DB(db, prod) \
10 (*(uint32_t *)db = (DB_KEY_RX | (prod)))
12 #define BNXT_TPA_L4_SIZE(x) \
14 typeof(x) hdr_info = (x); \
15 (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \
18 #define BNXT_TPA_INNER_L3_OFF(hdr_info) \
19 (((hdr_info) >> 18) & 0x1ff)
21 #define BNXT_TPA_INNER_L2_OFF(hdr_info) \
22 (((hdr_info) >> 9) & 0x1ff)
24 #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
27 #define flags2_0xf(rxcmp1) \
28 (((rxcmp1)->flags2) & 0xf)
30 /* IP non tunnel can be with or without L4-
31 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP Or
32 * Ether / (vlan) / outer IP|IP6 / ICMP
33 * we use '==' instead of '&' because tunnel pkts have all 4 fields set.
35 #define IS_IP_NONTUNNEL_PKT(flags2_f) \
38 (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC))) || \
40 (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
41 RX_PKT_CMPL_FLAGS2_L4_CS_CALC))) \
44 /* IP Tunnel pkt must have atleast tunnel-IP-calc set.
45 * again tunnel ie outer L4 is optional bcoz of
46 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
47 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
49 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
51 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
53 * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
54 * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
55 * also inner L3 chksum error is not taken into consideration by DPDK.
57 #define IS_IP_TUNNEL_PKT(flags2_f) \
58 ((flags2_f) & rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC))
60 /* RX_PKT_CMPL_ERRORS_IP_CS_ERROR only for Non-tunnel pkts.
61 * For tunnel pkts RX_PKT_CMPL_ERRORS_IP_CS_ERROR is not accounted and treated
64 #define RX_CMP_IP_CS_ERROR(rxcmp1) \
65 ((rxcmp1)->errors_v2 & \
66 rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
68 #define RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) \
69 ((rxcmp1)->errors_v2 & \
70 rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR))
72 #define RX_CMP_IP_CS_BITS \
73 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
74 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
76 #define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
77 !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
80 * Ether / (vlan) / IP6 / UDP|TCP|SCTP
82 #define IS_L4_NONTUNNEL_PKT(flags2_f) \
85 (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
86 RX_PKT_CMPL_FLAGS2_L4_CS_CALC))))
89 * Outer L4 is not mandatory. Eg: GRE-
90 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
91 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
94 #define IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) \
96 (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
97 RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
98 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC | \
99 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)))
101 #define IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \
103 (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
104 RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
105 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)))
107 #define IS_L4_TUNNEL_PKT(flags2_f) \
109 IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) || \
110 IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \
113 #define RX_CMP_L4_CS_BITS \
114 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
116 #define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \
117 !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
119 #define RX_CMP_T_L4_CS_BITS \
120 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
122 #define RX_CMP_T_L4_CS_UNKNOWN(rxcmp1) \
123 !((rxcmp1)->flags2 & RX_CMP_T_L4_CS_BITS)
125 /* Outer L4 chksum error
127 #define RX_CMP_L4_OUTER_CS_ERR2(rxcmp1) \
128 ((rxcmp1)->errors_v2 & \
129 rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))
131 /* Inner L4 chksum error
133 #define RX_CMP_L4_INNER_CS_ERR2(rxcmp1) \
134 ((rxcmp1)->errors_v2 & \
135 rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR))
137 #define BNXT_RX_POST_THRESH 32
139 enum pkt_hash_types {
140 PKT_HASH_TYPE_NONE, /* Undefined type */
141 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
142 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
143 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
146 struct bnxt_tpa_info {
147 struct rte_mbuf *mbuf;
149 unsigned short gso_type;
152 enum pkt_hash_types hash_type;
157 struct bnxt_sw_rx_bd {
158 struct rte_mbuf *mbuf; /* data associated with RX descriptor */
161 struct bnxt_rx_ring_info {
164 struct bnxt_db_info rx_db;
165 struct bnxt_db_info ag_db;
167 struct rx_prod_pkt_bd *rx_desc_ring;
168 struct rx_prod_pkt_bd *ag_desc_ring;
169 struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */
170 struct bnxt_sw_rx_bd *ag_buf_ring; /* sw ring */
172 rte_iova_t rx_desc_mapping;
173 rte_iova_t ag_desc_mapping;
175 struct bnxt_ring *rx_ring_struct;
176 struct bnxt_ring *ag_ring_struct;
179 * To deal with out of order return from TPA, use free buffer indicator
181 struct rte_bitmap *ag_bitmap;
183 struct bnxt_tpa_info *tpa_info;
186 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
188 uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
190 void bnxt_free_rx_rings(struct bnxt *bp);
191 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
192 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
193 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
194 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
197 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
199 int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);