net/enic: count truncated packets
[dpdk.git] / drivers / net / enic / enic_rx.c
1 /*
2  * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * Copyright (c) 2014, Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in
17  * the documentation and/or other materials provided with the
18  * distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34
35 #include <rte_mbuf.h>
36 #include <rte_ethdev.h>
37 #include <rte_prefetch.h>
38
39 #include "enic_compat.h"
40 #include "rq_enet_desc.h"
41 #include "enic.h"
42
43 #define RTE_PMD_USE_PREFETCH
44
45 #ifdef RTE_PMD_USE_PREFETCH
46 /*
47  * Prefetch a cache line into all cache levels.
48  */
49 #define rte_enic_prefetch(p) rte_prefetch0(p)
50 #else
51 #define rte_enic_prefetch(p) do {} while (0)
52 #endif
53
54 #ifdef RTE_PMD_PACKET_PREFETCH
55 #define rte_packet_prefetch(p) rte_prefetch1(p)
56 #else
57 #define rte_packet_prefetch(p) do {} while (0)
58 #endif
59
60 static inline uint16_t
61 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
62 {
63         return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
64 }
65
66 static inline uint16_t
67 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
68 {
69         return(le16_to_cpu(crd->bytes_written_flags) &
70                 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
71 }
72
73 static inline uint8_t
74 enic_cq_rx_desc_packet_error(uint16_t bwflags)
75 {
76         return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
77                 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
78 }
79
80 static inline uint8_t
81 enic_cq_rx_desc_eop(uint16_t ciflags)
82 {
83         return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
84                 == CQ_ENET_RQ_DESC_FLAGS_EOP;
85 }
86
87 static inline uint8_t
88 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
89 {
90         return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
91                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
92                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
93 }
94
95 static inline uint8_t
96 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
97 {
98         return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
99                 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
100 }
101
102 static inline uint8_t
103 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
104 {
105         return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
106                 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
107 }
108
109 static inline uint8_t
110 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
111 {
112         return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
113                 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
114 }
115
116 static inline uint32_t
117 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
118 {
119         return le32_to_cpu(cqrd->rss_hash);
120 }
121
122 static inline uint16_t
123 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
124 {
125         return le16_to_cpu(cqrd->vlan);
126 }
127
128 static inline uint16_t
129 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
130 {
131         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
132         return le16_to_cpu(cqrd->bytes_written_flags) &
133                 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
134 }
135
136 static inline uint8_t
137 enic_cq_rx_check_err(struct cq_desc *cqd)
138 {
139         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
140         uint16_t bwflags;
141
142         bwflags = enic_cq_rx_desc_bwflags(cqrd);
143         if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
144                 return 1;
145         return 0;
146 }
147
148 /*
149  * Lookup table to translate RX CQ flags to mbuf flags.
150  */
151 static inline uint32_t
152 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
153 {
154         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
155         uint8_t cqrd_flags = cqrd->flags;
156         static const uint32_t cq_type_table[128] __rte_cache_aligned = {
157                 [32] =  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
158                 [34] =  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
159                         | RTE_PTYPE_L4_UDP,
160                 [36] =  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
161                         | RTE_PTYPE_L4_TCP,
162                 [96] =  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
163                         | RTE_PTYPE_L4_FRAG,
164                 [16] =  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
165                 [18] =  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
166                         | RTE_PTYPE_L4_UDP,
167                 [20] =  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
168                         | RTE_PTYPE_L4_TCP,
169                 [80] =  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
170                         | RTE_PTYPE_L4_FRAG,
171                 /* All others reserved */
172         };
173         cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
174                 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
175                 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
176         return cq_type_table[cqrd_flags];
177 }
178
179 static inline void
180 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
181 {
182         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
183         uint16_t ciflags, bwflags, pkt_flags = 0;
184         ciflags = enic_cq_rx_desc_ciflags(cqrd);
185         bwflags = enic_cq_rx_desc_bwflags(cqrd);
186
187         mbuf->ol_flags = 0;
188
189         /* flags are meaningless if !EOP */
190         if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
191                 goto mbuf_flags_done;
192
193         /* VLAN stripping */
194         if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
195                 pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
196                 mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
197         } else {
198                 mbuf->vlan_tci = 0;
199         }
200
201         /* RSS flag */
202         if (enic_cq_rx_desc_rss_type(cqrd)) {
203                 pkt_flags |= PKT_RX_RSS_HASH;
204                 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
205         }
206
207         /* checksum flags */
208         if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
209                 (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
210                 if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
211                         pkt_flags |= PKT_RX_IP_CKSUM_BAD;
212                 if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
213                         if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
214                                 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
215                 }
216         }
217
218  mbuf_flags_done:
219         mbuf->ol_flags = pkt_flags;
220 }
221
222 static inline uint32_t
223 enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
224 {
225         uint32_t d = i0 + i1;
226         RTE_ASSERT(i0 < n_descriptors);
227         RTE_ASSERT(i1 < n_descriptors);
228         d -= (d >= n_descriptors) ? n_descriptors : 0;
229         return d;
230 }
231
232
233 uint16_t
234 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
235                uint16_t nb_pkts)
236 {
237         struct vnic_rq *rq = rx_queue;
238         struct enic *enic = vnic_dev_priv(rq->vdev);
239         unsigned int rx_id;
240         struct rte_mbuf *nmb, *rxmb;
241         uint16_t nb_rx = 0, nb_err = 0;
242         uint16_t nb_hold;
243         struct vnic_cq *cq;
244         volatile struct cq_desc *cqd_ptr;
245         uint8_t color;
246
247         cq = &enic->cq[enic_cq_rq(enic, rq->index)];
248         rx_id = cq->to_clean;           /* index of cqd, rqd, mbuf_table */
249         cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
250
251         nb_hold = rq->rx_nb_hold;       /* mbufs held by software */
252
253         while (nb_rx < nb_pkts) {
254                 volatile struct rq_enet_desc *rqd_ptr;
255                 dma_addr_t dma_addr;
256                 struct cq_desc cqd;
257                 uint8_t packet_error;
258
259                 /* Check for pkts available */
260                 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
261                         & CQ_DESC_COLOR_MASK;
262                 if (color == cq->last_color)
263                         break;
264
265                 /* Get the cq descriptor and rq pointer */
266                 cqd = *cqd_ptr;
267                 rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
268
269                 /* allocate a new mbuf */
270                 nmb = rte_mbuf_raw_alloc(rq->mp);
271                 if (nmb == NULL) {
272                         rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
273                         break;
274                 }
275
276                 /* A packet error means descriptor and data are untrusted */
277                 packet_error = enic_cq_rx_check_err(&cqd);
278
279                 /* Get the mbuf to return and replace with one just allocated */
280                 rxmb = rq->mbuf_ring[rx_id];
281                 rq->mbuf_ring[rx_id] = nmb;
282
283                 /* Increment cqd, rqd, mbuf_table index */
284                 rx_id++;
285                 if (unlikely(rx_id == rq->ring.desc_count)) {
286                         rx_id = 0;
287                         cq->last_color = cq->last_color ? 0 : 1;
288                 }
289
290                 /* Prefetch next mbuf & desc while processing current one */
291                 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
292                 rte_enic_prefetch(cqd_ptr);
293                 rte_enic_prefetch(rq->mbuf_ring[rx_id]);
294                 rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
295                                  + rx_id);
296
297                 /* Push descriptor for newly allocated mbuf */
298                 dma_addr = (dma_addr_t)(nmb->buf_physaddr
299                            + RTE_PKTMBUF_HEADROOM);
300                 rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
301                 rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
302                                        - RTE_PKTMBUF_HEADROOM);
303
304                 /* Drop incoming bad packet */
305                 if (unlikely(packet_error)) {
306                         rte_pktmbuf_free(rxmb);
307                         nb_err++;
308                         continue;
309                 }
310
311                 /* Fill in the rest of the mbuf */
312                 rxmb->data_off = RTE_PKTMBUF_HEADROOM;
313                 rxmb->nb_segs = 1;
314                 rxmb->next = NULL;
315                 rxmb->port = enic->port_id;
316                 rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
317                 rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
318                 enic_cq_rx_to_pkt_flags(&cqd, rxmb);
319                 rxmb->data_len = rxmb->pkt_len;
320
321                 /* prefetch mbuf data for caller */
322                 rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
323                                     RTE_PKTMBUF_HEADROOM));
324
325                 /* store the mbuf address into the next entry of the array */
326                 rx_pkts[nb_rx++] = rxmb;
327         }
328
329         nb_hold += nb_rx + nb_err;
330         cq->to_clean = rx_id;
331
332         if (nb_hold > rq->rx_free_thresh) {
333                 rq->posted_index = enic_ring_add(rq->ring.desc_count,
334                                 rq->posted_index, nb_hold);
335                 nb_hold = 0;
336                 rte_mb();
337                 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
338         }
339
340         rq->rx_nb_hold = nb_hold;
341
342         return nb_rx;
343 }