2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * Copyright (c) 2014, Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_ethdev.h>
37 #include <rte_prefetch.h>
39 #include "enic_compat.h"
40 #include "rq_enet_desc.h"
43 #define RTE_PMD_USE_PREFETCH
45 #ifdef RTE_PMD_USE_PREFETCH
47 * Prefetch a cache line into all cache levels.
49 #define rte_enic_prefetch(p) rte_prefetch0(p)
51 #define rte_enic_prefetch(p) do {} while (0)
54 #ifdef RTE_PMD_PACKET_PREFETCH
55 #define rte_packet_prefetch(p) rte_prefetch1(p)
57 #define rte_packet_prefetch(p) do {} while (0)
60 static inline struct rte_mbuf *
61 rte_rxmbuf_alloc(struct rte_mempool *mp)
65 m = __rte_mbuf_raw_alloc(mp);
66 __rte_mbuf_sanity_check_raw(m, 0);
70 static inline uint16_t
71 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
73 return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
76 static inline uint16_t
77 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
79 return(le16_to_cpu(crd->bytes_written_flags) &
80 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
84 enic_cq_rx_desc_packet_error(uint16_t bwflags)
86 return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
87 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
91 enic_cq_rx_desc_eop(uint16_t ciflags)
93 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
94 == CQ_ENET_RQ_DESC_FLAGS_EOP;
98 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
100 return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
101 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
102 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
105 static inline uint8_t
106 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
108 return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
109 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
112 static inline uint8_t
113 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
115 return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
116 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
119 static inline uint8_t
120 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
122 return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
123 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
126 static inline uint32_t
127 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
129 return le32_to_cpu(cqrd->rss_hash);
132 static inline uint8_t
133 enic_cq_rx_desc_fcs_ok(struct cq_enet_rq_desc *cqrd)
135 return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ==
136 CQ_ENET_RQ_DESC_FLAGS_FCS_OK);
139 static inline uint16_t
140 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
142 return le16_to_cpu(cqrd->vlan);
145 static inline uint16_t
146 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
148 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
149 return le16_to_cpu(cqrd->bytes_written_flags) &
150 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
153 static inline uint64_t
154 enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd)
156 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
158 uint64_t pkt_err_flags = 0;
160 bwflags = enic_cq_rx_desc_bwflags(cqrd);
162 /* Check for packet error. Can't be more specific than MAC error */
163 if (enic_cq_rx_desc_packet_error(bwflags)) {
164 pkt_err_flags |= PKT_RX_MAC_ERR;
167 /* Check for bad FCS. MAC error isn't quite, but no other choice */
168 if (!enic_cq_rx_desc_fcs_ok(cqrd)) {
169 pkt_err_flags |= PKT_RX_MAC_ERR;
171 return pkt_err_flags;
175 * Lookup table to translate RX CQ flags to mbuf flags.
177 static inline uint32_t
178 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
180 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
181 uint8_t cqrd_flags = cqrd->flags;
182 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
183 [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
184 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
186 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
188 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
190 [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
191 [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
193 [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
195 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
197 /* All others reserved */
199 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
200 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
201 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
202 return cq_type_table[cqrd_flags];
206 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
208 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
209 uint16_t ciflags, bwflags, pkt_flags = 0;
210 ciflags = enic_cq_rx_desc_ciflags(cqrd);
211 bwflags = enic_cq_rx_desc_bwflags(cqrd);
213 ASSERT(mbuf->ol_flags == 0);
215 /* flags are meaningless if !EOP */
216 if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
217 goto mbuf_flags_done;
220 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
221 pkt_flags |= PKT_RX_VLAN_PKT;
222 mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
228 if (enic_cq_rx_desc_rss_type(cqrd)) {
229 pkt_flags |= PKT_RX_RSS_HASH;
230 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
234 if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
235 (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
236 if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
237 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
238 if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
239 if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
240 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
245 mbuf->ol_flags = pkt_flags;
248 static inline uint32_t
249 enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
251 uint32_t d = i0 + i1;
252 ASSERT(i0 < n_descriptors);
253 ASSERT(i1 < n_descriptors);
254 d -= (d >= n_descriptors) ? n_descriptors : 0;
260 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
263 struct vnic_rq *rq = rx_queue;
264 struct enic *enic = vnic_dev_priv(rq->vdev);
266 struct rte_mbuf *nmb, *rxmb;
270 volatile struct cq_desc *cqd_ptr;
273 cq = &enic->cq[enic_cq_rq(enic, rq->index)];
274 rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
275 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
277 nb_hold = rq->rx_nb_hold; /* mbufs held by software */
279 while (nb_rx < nb_pkts) {
281 volatile struct rq_enet_desc *rqd_ptr;
284 uint64_t ol_err_flags;
286 /* Check for pkts available */
287 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
288 & CQ_DESC_COLOR_MASK;
289 if (color == cq->last_color)
292 /* Get the cq descriptor and rq pointer */
294 rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
296 /* allocate a new mbuf */
297 nmb = rte_rxmbuf_alloc(rq->mp);
299 dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
300 enic->port_id, (unsigned)rq->index);
301 rte_eth_devices[enic->port_id].
302 data->rx_mbuf_alloc_failed++;
306 /* Check for FCS or packet errors */
307 ol_err_flags = enic_cq_rx_to_pkt_err_flags(&cqd);
308 if (ol_err_flags == 0)
309 rx_pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
313 /* Get the mbuf to return and replace with one just allocated */
314 rxmb = rq->mbuf_ring[rx_id];
315 rq->mbuf_ring[rx_id] = nmb;
317 /* Increment cqd, rqd, mbuf_table index */
319 if (unlikely(rx_id == rq->ring.desc_count)) {
321 cq->last_color = cq->last_color ? 0 : 1;
324 /* Prefetch next mbuf & desc while processing current one */
325 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
326 rte_enic_prefetch(cqd_ptr);
327 rte_enic_prefetch(rq->mbuf_ring[rx_id]);
328 rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
331 /* Push descriptor for newly allocated mbuf */
332 dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off);
333 rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
334 rqd_ptr->length_type = cpu_to_le16(nmb->buf_len);
336 /* Fill in the rest of the mbuf */
337 rxmb->data_off = RTE_PKTMBUF_HEADROOM;
340 rxmb->pkt_len = rx_pkt_len;
341 rxmb->data_len = rx_pkt_len;
342 rxmb->port = enic->port_id;
343 rxmb->ol_flags = ol_err_flags;
345 enic_cq_rx_to_pkt_flags(&cqd, rxmb);
346 rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
348 /* prefetch mbuf data for caller */
349 rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
350 RTE_PKTMBUF_HEADROOM));
352 /* store the mbuf address into the next entry of the array */
353 rx_pkts[nb_rx++] = rxmb;
357 cq->to_clean = rx_id;
359 if (nb_hold > rq->rx_free_thresh) {
360 rq->posted_index = enic_ring_add(rq->ring.desc_count,
361 rq->posted_index, nb_hold);
364 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
367 rq->rx_nb_hold = nb_hold;