2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * Copyright (c) 2014, Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_ethdev.h>
37 #include <rte_prefetch.h>
39 #include "enic_compat.h"
40 #include "rq_enet_desc.h"
43 #define RTE_PMD_USE_PREFETCH
45 #ifdef RTE_PMD_USE_PREFETCH
47 * Prefetch a cache line into all cache levels.
49 #define rte_enic_prefetch(p) rte_prefetch0(p)
51 #define rte_enic_prefetch(p) do {} while (0)
54 #ifdef RTE_PMD_PACKET_PREFETCH
55 #define rte_packet_prefetch(p) rte_prefetch1(p)
57 #define rte_packet_prefetch(p) do {} while (0)
60 static inline struct rte_mbuf *
61 rte_rxmbuf_alloc(struct rte_mempool *mp)
65 m = __rte_mbuf_raw_alloc(mp);
66 __rte_mbuf_sanity_check_raw(m, 0);
70 static inline uint16_t
71 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
73 return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
76 static inline uint16_t
77 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
79 return(le16_to_cpu(crd->bytes_written_flags) &
80 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
84 enic_cq_rx_desc_packet_error(uint16_t bwflags)
86 return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
87 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
91 enic_cq_rx_desc_eop(uint16_t ciflags)
93 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
94 == CQ_ENET_RQ_DESC_FLAGS_EOP;
98 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
100 return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
101 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
102 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
105 static inline uint8_t
106 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
108 return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
109 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
112 static inline uint8_t
113 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
115 return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
116 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
119 static inline uint8_t
120 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
122 return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
123 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
126 static inline uint32_t
127 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
129 return le32_to_cpu(cqrd->rss_hash);
132 static inline uint16_t
133 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
135 return le16_to_cpu(cqrd->vlan);
138 static inline uint16_t
139 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
141 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
142 return le16_to_cpu(cqrd->bytes_written_flags) &
143 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
146 static inline uint8_t
147 enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
149 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
152 uint64_t pkt_err_flags = 0;
154 bwflags = enic_cq_rx_desc_bwflags(cqrd);
155 if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
156 pkt_err_flags = PKT_RX_MAC_ERR;
159 *pkt_err_flags_out = pkt_err_flags;
164 * Lookup table to translate RX CQ flags to mbuf flags.
166 static inline uint32_t
167 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
169 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
170 uint8_t cqrd_flags = cqrd->flags;
171 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
172 [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
173 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
175 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
177 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
179 [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
180 [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
182 [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
184 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
186 /* All others reserved */
188 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
189 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
190 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
191 return cq_type_table[cqrd_flags];
195 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
197 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
198 uint16_t ciflags, bwflags, pkt_flags = 0;
199 ciflags = enic_cq_rx_desc_ciflags(cqrd);
200 bwflags = enic_cq_rx_desc_bwflags(cqrd);
204 /* flags are meaningless if !EOP */
205 if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
206 goto mbuf_flags_done;
209 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
210 pkt_flags |= PKT_RX_VLAN_PKT;
211 mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
217 if (enic_cq_rx_desc_rss_type(cqrd)) {
218 pkt_flags |= PKT_RX_RSS_HASH;
219 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
223 if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
224 (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
225 if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
226 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
227 if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
228 if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
229 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
234 mbuf->ol_flags = pkt_flags;
237 static inline uint32_t
238 enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
240 uint32_t d = i0 + i1;
241 ASSERT(i0 < n_descriptors);
242 ASSERT(i1 < n_descriptors);
243 d -= (d >= n_descriptors) ? n_descriptors : 0;
249 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
252 struct vnic_rq *rq = rx_queue;
253 struct enic *enic = vnic_dev_priv(rq->vdev);
255 struct rte_mbuf *nmb, *rxmb;
259 volatile struct cq_desc *cqd_ptr;
262 cq = &enic->cq[enic_cq_rq(enic, rq->index)];
263 rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
264 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
266 nb_hold = rq->rx_nb_hold; /* mbufs held by software */
268 while (nb_rx < nb_pkts) {
269 volatile struct rq_enet_desc *rqd_ptr;
272 uint64_t ol_err_flags;
273 uint8_t packet_error;
275 /* Check for pkts available */
276 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
277 & CQ_DESC_COLOR_MASK;
278 if (color == cq->last_color)
281 /* Get the cq descriptor and rq pointer */
283 rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
285 /* allocate a new mbuf */
286 nmb = rte_rxmbuf_alloc(rq->mp);
288 dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
289 enic->port_id, (unsigned)rq->index);
290 rte_eth_devices[enic->port_id].
291 data->rx_mbuf_alloc_failed++;
295 /* A packet error means descriptor and data are untrusted */
296 packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
298 /* Get the mbuf to return and replace with one just allocated */
299 rxmb = rq->mbuf_ring[rx_id];
300 rq->mbuf_ring[rx_id] = nmb;
302 /* Increment cqd, rqd, mbuf_table index */
304 if (unlikely(rx_id == rq->ring.desc_count)) {
306 cq->last_color = cq->last_color ? 0 : 1;
309 /* Prefetch next mbuf & desc while processing current one */
310 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
311 rte_enic_prefetch(cqd_ptr);
312 rte_enic_prefetch(rq->mbuf_ring[rx_id]);
313 rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
316 /* Push descriptor for newly allocated mbuf */
317 dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off);
318 rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
319 rqd_ptr->length_type = cpu_to_le16(nmb->buf_len);
321 /* Fill in the rest of the mbuf */
322 rxmb->data_off = RTE_PKTMBUF_HEADROOM;
325 rxmb->port = enic->port_id;
327 rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
328 rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
329 enic_cq_rx_to_pkt_flags(&cqd, rxmb);
332 rxmb->packet_type = 0;
335 rxmb->data_len = rxmb->pkt_len;
337 /* prefetch mbuf data for caller */
338 rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
339 RTE_PKTMBUF_HEADROOM));
341 /* store the mbuf address into the next entry of the array */
342 rx_pkts[nb_rx++] = rxmb;
346 cq->to_clean = rx_id;
348 if (nb_hold > rq->rx_free_thresh) {
349 rq->posted_index = enic_ring_add(rq->ring.desc_count,
350 rq->posted_index, nb_hold);
353 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
356 rq->rx_nb_hold = nb_hold;