1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2018 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #ifndef _ENIC_RXTX_COMMON_H_
7 #define _ENIC_RXTX_COMMON_H_
9 #include <rte_byteorder.h>
11 static inline uint16_t
12 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
14 return rte_le_to_cpu_16(crd->completed_index_flags) &
15 ~CQ_DESC_COMP_NDX_MASK;
18 static inline uint16_t
19 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
21 return rte_le_to_cpu_16(crd->bytes_written_flags) &
22 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
26 enic_cq_rx_desc_packet_error(uint16_t bwflags)
28 return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
29 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
33 enic_cq_rx_desc_eop(uint16_t ciflags)
35 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
36 == CQ_ENET_RQ_DESC_FLAGS_EOP;
40 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
42 return (rte_le_to_cpu_16(cqrd->q_number_rss_type_flags) &
43 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
44 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
48 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
50 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
51 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
55 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
57 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
58 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
62 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
64 return (uint8_t)((rte_le_to_cpu_16(cqrd->q_number_rss_type_flags) >>
65 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
68 static inline uint32_t
69 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
71 return rte_le_to_cpu_32(cqrd->rss_hash);
74 static inline uint16_t
75 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
77 return rte_le_to_cpu_16(cqrd->vlan);
80 static inline uint16_t
81 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
83 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
84 return rte_le_to_cpu_16(cqrd->bytes_written_flags) &
85 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
90 enic_cq_rx_check_err(struct cq_desc *cqd)
92 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
95 bwflags = enic_cq_rx_desc_bwflags(cqrd);
96 if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
101 /* Lookup table to translate RX CQ flags to mbuf flags. */
103 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
105 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
106 uint8_t cqrd_flags = cqrd->flags;
108 * Odd-numbered entries are for tunnel packets. All packet type info
109 * applies to the inner packet, and there is no info on the outer
110 * packet. The outer flags in these entries exist only to avoid
111 * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
114 * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
115 * RTE_PTYPE_TUNNEL_GRENAT..
117 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
118 [0x00] = RTE_PTYPE_UNKNOWN,
119 [0x01] = RTE_PTYPE_UNKNOWN |
120 RTE_PTYPE_TUNNEL_GRENAT |
121 RTE_PTYPE_INNER_L2_ETHER,
122 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
123 [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
124 RTE_PTYPE_TUNNEL_GRENAT |
125 RTE_PTYPE_INNER_L2_ETHER |
126 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
127 RTE_PTYPE_INNER_L4_NONFRAG,
128 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
129 [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
130 RTE_PTYPE_TUNNEL_GRENAT |
131 RTE_PTYPE_INNER_L2_ETHER |
132 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
133 RTE_PTYPE_INNER_L4_UDP,
134 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
135 [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
136 RTE_PTYPE_TUNNEL_GRENAT |
137 RTE_PTYPE_INNER_L2_ETHER |
138 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
139 RTE_PTYPE_INNER_L4_TCP,
140 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
141 [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
142 RTE_PTYPE_TUNNEL_GRENAT |
143 RTE_PTYPE_INNER_L2_ETHER |
144 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
145 RTE_PTYPE_INNER_L4_FRAG,
146 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
147 [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
148 RTE_PTYPE_TUNNEL_GRENAT |
149 RTE_PTYPE_INNER_L2_ETHER |
150 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
151 RTE_PTYPE_INNER_L4_FRAG,
152 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
153 [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
154 RTE_PTYPE_TUNNEL_GRENAT |
155 RTE_PTYPE_INNER_L2_ETHER |
156 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
157 RTE_PTYPE_INNER_L4_FRAG,
158 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
159 [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
160 RTE_PTYPE_TUNNEL_GRENAT |
161 RTE_PTYPE_INNER_L2_ETHER |
162 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
163 RTE_PTYPE_INNER_L4_NONFRAG,
164 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
165 [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
166 RTE_PTYPE_TUNNEL_GRENAT |
167 RTE_PTYPE_INNER_L2_ETHER |
168 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
169 RTE_PTYPE_INNER_L4_UDP,
170 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
171 [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
172 RTE_PTYPE_TUNNEL_GRENAT |
173 RTE_PTYPE_INNER_L2_ETHER |
174 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
175 RTE_PTYPE_INNER_L4_TCP,
176 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
177 [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
178 RTE_PTYPE_TUNNEL_GRENAT |
179 RTE_PTYPE_INNER_L2_ETHER |
180 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
181 RTE_PTYPE_INNER_L4_FRAG,
182 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
183 [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
184 RTE_PTYPE_TUNNEL_GRENAT |
185 RTE_PTYPE_INNER_L2_ETHER |
186 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
187 RTE_PTYPE_INNER_L4_FRAG,
188 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
189 [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
190 RTE_PTYPE_TUNNEL_GRENAT |
191 RTE_PTYPE_INNER_L2_ETHER |
192 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
193 RTE_PTYPE_INNER_L4_FRAG,
194 /* All others reserved */
196 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
197 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
198 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
199 return cq_type_table[cqrd_flags + tnl];
203 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
205 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
206 uint16_t bwflags, pkt_flags = 0, vlan_tci;
207 bwflags = enic_cq_rx_desc_bwflags(cqrd);
208 vlan_tci = enic_cq_rx_desc_vlan(cqrd);
210 /* VLAN STRIPPED flag. The L2 packet type updated here also */
211 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
212 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
213 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
216 pkt_flags |= PKT_RX_VLAN;
217 mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
219 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
222 mbuf->vlan_tci = vlan_tci;
224 if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
225 struct cq_enet_rq_clsf_desc *clsf_cqd;
227 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
228 filter_id = clsf_cqd->filter_id;
230 pkt_flags |= PKT_RX_FDIR;
231 if (filter_id != ENIC_MAGIC_FILTER_ID) {
232 /* filter_id = mark id + 1, so subtract 1 */
233 mbuf->hash.fdir.hi = filter_id - 1;
234 pkt_flags |= PKT_RX_FDIR_ID;
237 } else if (enic_cq_rx_desc_rss_type(cqrd)) {
239 pkt_flags |= PKT_RX_RSS_HASH;
240 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
244 if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
245 if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
247 l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
250 * When overlay offload is enabled, the NIC may
251 * set ipv4_csum_ok=1 if the inner packet is IPv6..
252 * So, explicitly check for IPv4 before checking
255 if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
256 if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
257 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
259 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
262 if (l4_flags == RTE_PTYPE_L4_UDP ||
263 l4_flags == RTE_PTYPE_L4_TCP) {
264 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
265 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
267 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
272 mbuf->ol_flags = pkt_flags;
275 #endif /* _ENIC_RXTX_COMMON_H_ */