1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2018 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #ifndef _ENIC_RXTX_COMMON_H_
7 #define _ENIC_RXTX_COMMON_H_
10 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
12 return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
15 static inline uint16_t
16 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
18 return le16_to_cpu(crd->bytes_written_flags) &
19 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
23 enic_cq_rx_desc_packet_error(uint16_t bwflags)
25 return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
26 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
30 enic_cq_rx_desc_eop(uint16_t ciflags)
32 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
33 == CQ_ENET_RQ_DESC_FLAGS_EOP;
37 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
39 return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
40 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
41 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
45 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
47 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
48 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
52 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
54 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
55 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
59 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
61 return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
62 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
65 static inline uint32_t
66 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
68 return le32_to_cpu(cqrd->rss_hash);
71 static inline uint16_t
72 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
74 return le16_to_cpu(cqrd->vlan);
77 static inline uint16_t
78 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
80 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
81 return le16_to_cpu(cqrd->bytes_written_flags) &
82 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
87 enic_cq_rx_check_err(struct cq_desc *cqd)
89 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
92 bwflags = enic_cq_rx_desc_bwflags(cqrd);
93 if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
98 /* Lookup table to translate RX CQ flags to mbuf flags. */
100 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
102 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
103 uint8_t cqrd_flags = cqrd->flags;
105 * Odd-numbered entries are for tunnel packets. All packet type info
106 * applies to the inner packet, and there is no info on the outer
107 * packet. The outer flags in these entries exist only to avoid
108 * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
111 * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
112 * RTE_PTYPE_TUNNEL_GRENAT..
114 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
115 [0x00] = RTE_PTYPE_UNKNOWN,
116 [0x01] = RTE_PTYPE_UNKNOWN |
117 RTE_PTYPE_TUNNEL_GRENAT |
118 RTE_PTYPE_INNER_L2_ETHER,
119 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
120 [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
121 RTE_PTYPE_TUNNEL_GRENAT |
122 RTE_PTYPE_INNER_L2_ETHER |
123 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
124 RTE_PTYPE_INNER_L4_NONFRAG,
125 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
126 [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
127 RTE_PTYPE_TUNNEL_GRENAT |
128 RTE_PTYPE_INNER_L2_ETHER |
129 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
130 RTE_PTYPE_INNER_L4_UDP,
131 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
132 [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
133 RTE_PTYPE_TUNNEL_GRENAT |
134 RTE_PTYPE_INNER_L2_ETHER |
135 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
136 RTE_PTYPE_INNER_L4_TCP,
137 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
138 [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
139 RTE_PTYPE_TUNNEL_GRENAT |
140 RTE_PTYPE_INNER_L2_ETHER |
141 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
142 RTE_PTYPE_INNER_L4_FRAG,
143 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
144 [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
145 RTE_PTYPE_TUNNEL_GRENAT |
146 RTE_PTYPE_INNER_L2_ETHER |
147 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
148 RTE_PTYPE_INNER_L4_FRAG,
149 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
150 [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
151 RTE_PTYPE_TUNNEL_GRENAT |
152 RTE_PTYPE_INNER_L2_ETHER |
153 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
154 RTE_PTYPE_INNER_L4_FRAG,
155 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
156 [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
157 RTE_PTYPE_TUNNEL_GRENAT |
158 RTE_PTYPE_INNER_L2_ETHER |
159 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
160 RTE_PTYPE_INNER_L4_NONFRAG,
161 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
162 [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
163 RTE_PTYPE_TUNNEL_GRENAT |
164 RTE_PTYPE_INNER_L2_ETHER |
165 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
166 RTE_PTYPE_INNER_L4_UDP,
167 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
168 [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
169 RTE_PTYPE_TUNNEL_GRENAT |
170 RTE_PTYPE_INNER_L2_ETHER |
171 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
172 RTE_PTYPE_INNER_L4_TCP,
173 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
174 [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
175 RTE_PTYPE_TUNNEL_GRENAT |
176 RTE_PTYPE_INNER_L2_ETHER |
177 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
178 RTE_PTYPE_INNER_L4_FRAG,
179 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
180 [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
181 RTE_PTYPE_TUNNEL_GRENAT |
182 RTE_PTYPE_INNER_L2_ETHER |
183 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
184 RTE_PTYPE_INNER_L4_FRAG,
185 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
186 [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
187 RTE_PTYPE_TUNNEL_GRENAT |
188 RTE_PTYPE_INNER_L2_ETHER |
189 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
190 RTE_PTYPE_INNER_L4_FRAG,
191 /* All others reserved */
193 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
194 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
195 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
196 return cq_type_table[cqrd_flags + tnl];
200 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
202 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
203 uint16_t bwflags, pkt_flags = 0, vlan_tci;
204 bwflags = enic_cq_rx_desc_bwflags(cqrd);
205 vlan_tci = enic_cq_rx_desc_vlan(cqrd);
207 /* VLAN STRIPPED flag. The L2 packet type updated here also */
208 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
209 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
210 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
213 pkt_flags |= PKT_RX_VLAN;
214 mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
216 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
219 mbuf->vlan_tci = vlan_tci;
221 if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
222 struct cq_enet_rq_clsf_desc *clsf_cqd;
224 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
225 filter_id = clsf_cqd->filter_id;
227 pkt_flags |= PKT_RX_FDIR;
228 if (filter_id != ENIC_MAGIC_FILTER_ID) {
229 /* filter_id = mark id + 1, so subtract 1 */
230 mbuf->hash.fdir.hi = filter_id - 1;
231 pkt_flags |= PKT_RX_FDIR_ID;
234 } else if (enic_cq_rx_desc_rss_type(cqrd)) {
236 pkt_flags |= PKT_RX_RSS_HASH;
237 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
241 if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
242 if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
244 l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
247 * When overlay offload is enabled, the NIC may
248 * set ipv4_csum_ok=1 if the inner packet is IPv6..
249 * So, explicitly check for IPv4 before checking
252 if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
253 if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
254 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
256 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
259 if (l4_flags == RTE_PTYPE_L4_UDP ||
260 l4_flags == RTE_PTYPE_L4_TCP) {
261 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
262 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
264 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
269 mbuf->ol_flags = pkt_flags;
272 #endif /* _ENIC_RXTX_COMMON_H_ */