1 /* Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
2 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * Copyright (c) 2014, Cisco Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_prefetch.h>
37 #include "enic_compat.h"
38 #include "rq_enet_desc.h"
41 #define RTE_PMD_USE_PREFETCH
43 #ifdef RTE_PMD_USE_PREFETCH
44 /*Prefetch a cache line into all cache levels. */
45 #define rte_enic_prefetch(p) rte_prefetch0(p)
47 #define rte_enic_prefetch(p) do {} while (0)
50 #ifdef RTE_PMD_PACKET_PREFETCH
51 #define rte_packet_prefetch(p) rte_prefetch1(p)
53 #define rte_packet_prefetch(p) do {} while (0)
56 static inline uint16_t
57 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
59 return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
62 static inline uint16_t
63 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
65 return le16_to_cpu(crd->bytes_written_flags) &
66 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
70 enic_cq_rx_desc_packet_error(uint16_t bwflags)
72 return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
73 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
77 enic_cq_rx_desc_eop(uint16_t ciflags)
79 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
80 == CQ_ENET_RQ_DESC_FLAGS_EOP;
84 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
86 return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
87 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
88 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
92 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
94 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
95 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
99 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
101 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
102 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
105 static inline uint8_t
106 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
108 return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
109 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
112 static inline uint32_t
113 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
115 return le32_to_cpu(cqrd->rss_hash);
118 static inline uint16_t
119 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
121 return le16_to_cpu(cqrd->vlan);
124 static inline uint16_t
125 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
127 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
128 return le16_to_cpu(cqrd->bytes_written_flags) &
129 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
132 static inline uint8_t
133 enic_cq_rx_check_err(struct cq_desc *cqd)
135 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
138 bwflags = enic_cq_rx_desc_bwflags(cqrd);
139 if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
144 /* Lookup table to translate RX CQ flags to mbuf flags. */
145 static inline uint32_t
146 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
148 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
149 uint8_t cqrd_flags = cqrd->flags;
150 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
151 [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
152 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
154 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
156 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
158 [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
159 [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
161 [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
163 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
165 /* All others reserved */
167 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
168 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
169 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
170 return cq_type_table[cqrd_flags];
174 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
176 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
177 uint16_t ciflags, bwflags, pkt_flags = 0;
178 ciflags = enic_cq_rx_desc_ciflags(cqrd);
179 bwflags = enic_cq_rx_desc_bwflags(cqrd);
183 /* flags are meaningless if !EOP */
184 if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
185 goto mbuf_flags_done;
188 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
189 pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
190 mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
196 if (enic_cq_rx_desc_rss_type(cqrd)) {
197 pkt_flags |= PKT_RX_RSS_HASH;
198 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
202 if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
203 (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
204 if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
205 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
206 if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
207 if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
208 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
213 mbuf->ol_flags = pkt_flags;
217 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
220 struct vnic_rq *rq = rx_queue;
221 struct enic *enic = vnic_dev_priv(rq->vdev);
223 struct rte_mbuf *nmb, *rxmb;
224 uint16_t nb_rx = 0, nb_err = 0;
227 volatile struct cq_desc *cqd_ptr;
230 cq = &enic->cq[enic_cq_rq(enic, rq->index)];
231 rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
232 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
234 nb_hold = rq->rx_nb_hold; /* mbufs held by software */
236 while (nb_rx < nb_pkts) {
237 volatile struct rq_enet_desc *rqd_ptr;
240 uint8_t packet_error;
242 /* Check for pkts available */
243 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
244 & CQ_DESC_COLOR_MASK;
245 if (color == cq->last_color)
248 /* Get the cq descriptor and rq pointer */
250 rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
252 /* allocate a new mbuf */
253 nmb = rte_mbuf_raw_alloc(rq->mp);
255 rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
259 /* A packet error means descriptor and data are untrusted */
260 packet_error = enic_cq_rx_check_err(&cqd);
262 /* Get the mbuf to return and replace with one just allocated */
263 rxmb = rq->mbuf_ring[rx_id];
264 rq->mbuf_ring[rx_id] = nmb;
266 /* Increment cqd, rqd, mbuf_table index */
268 if (unlikely(rx_id == rq->ring.desc_count)) {
270 cq->last_color = cq->last_color ? 0 : 1;
273 /* Prefetch next mbuf & desc while processing current one */
274 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
275 rte_enic_prefetch(cqd_ptr);
276 rte_enic_prefetch(rq->mbuf_ring[rx_id]);
277 rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
280 /* Push descriptor for newly allocated mbuf */
281 dma_addr = (dma_addr_t)(nmb->buf_physaddr
282 + RTE_PKTMBUF_HEADROOM);
283 rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
284 rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
285 - RTE_PKTMBUF_HEADROOM);
287 /* Drop incoming bad packet */
288 if (unlikely(packet_error)) {
289 rte_pktmbuf_free(rxmb);
294 /* Fill in the rest of the mbuf */
295 rxmb->data_off = RTE_PKTMBUF_HEADROOM;
298 rxmb->port = enic->port_id;
299 rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
300 rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
301 enic_cq_rx_to_pkt_flags(&cqd, rxmb);
302 rxmb->data_len = rxmb->pkt_len;
304 /* prefetch mbuf data for caller */
305 rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
306 RTE_PKTMBUF_HEADROOM));
308 /* store the mbuf address into the next entry of the array */
309 rx_pkts[nb_rx++] = rxmb;
312 nb_hold += nb_rx + nb_err;
313 cq->to_clean = rx_id;
315 if (nb_hold > rq->rx_free_thresh) {
316 rq->posted_index = enic_ring_add(rq->ring.desc_count,
317 rq->posted_index, nb_hold);
320 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
323 rq->rx_nb_hold = nb_hold;
328 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
330 struct vnic_wq_buf *buf;
331 struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
332 unsigned int nb_to_free, nb_free = 0, i;
333 struct rte_mempool *pool;
334 unsigned int tail_idx;
335 unsigned int desc_count = wq->ring.desc_count;
337 nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
339 tail_idx = wq->tail_idx;
340 buf = &wq->bufs[tail_idx];
341 pool = ((struct rte_mbuf *)buf->mb)->pool;
342 for (i = 0; i < nb_to_free; i++) {
343 buf = &wq->bufs[tail_idx];
344 m = (struct rte_mbuf *)(buf->mb);
345 if (likely(m->pool == pool)) {
346 ENIC_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
349 rte_mempool_put_bulk(pool, (void *)free, nb_free);
354 tail_idx = enic_ring_incr(desc_count, tail_idx);
358 rte_mempool_put_bulk(pool, (void **)free, nb_free);
360 wq->tail_idx = tail_idx;
361 wq->ring.desc_avail += nb_to_free;
364 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
368 completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
370 if (wq->last_completed_index != completed_index) {
371 enic_free_wq_bufs(wq, completed_index);
372 wq->last_completed_index = completed_index;
377 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
381 unsigned int pkt_len, data_len;
382 unsigned int nb_segs;
383 struct rte_mbuf *tx_pkt;
384 struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
385 struct enic *enic = vnic_dev_priv(wq->vdev);
386 unsigned short vlan_id;
388 uint64_t ol_flags_mask;
389 unsigned int wq_desc_avail;
391 struct vnic_wq_buf *buf;
392 unsigned int desc_count;
393 struct wq_enet_desc *descs, *desc_p, desc_tmp;
395 uint8_t vlan_tag_insert;
399 enic_cleanup_wq(enic, wq);
400 wq_desc_avail = vnic_wq_desc_avail(wq);
401 head_idx = wq->head_idx;
402 desc_count = wq->ring.desc_count;
403 ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
405 nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
407 for (index = 0; index < nb_pkts; index++) {
409 nb_segs = tx_pkt->nb_segs;
410 if (nb_segs > wq_desc_avail) {
416 pkt_len = tx_pkt->pkt_len;
417 data_len = tx_pkt->data_len;
418 ol_flags = tx_pkt->ol_flags;
422 bus_addr = (dma_addr_t)
423 (tx_pkt->buf_physaddr + tx_pkt->data_off);
425 descs = (struct wq_enet_desc *)wq->ring.descs;
426 desc_p = descs + head_idx;
428 eop = (data_len == pkt_len);
430 if (ol_flags & ol_flags_mask) {
431 if (ol_flags & PKT_TX_VLAN_PKT) {
433 vlan_id = tx_pkt->vlan_tci;
436 if (ol_flags & PKT_TX_IP_CKSUM)
437 mss |= ENIC_CALC_IP_CKSUM;
439 /* Nic uses just 1 bit for UDP and TCP */
440 switch (ol_flags & PKT_TX_L4_MASK) {
441 case PKT_TX_TCP_CKSUM:
442 case PKT_TX_UDP_CKSUM:
443 mss |= ENIC_CALC_TCP_UDP_CKSUM;
448 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
449 eop, 0, vlan_tag_insert, vlan_id, 0);
452 buf = &wq->bufs[head_idx];
453 buf->mb = (void *)tx_pkt;
454 head_idx = enic_ring_incr(desc_count, head_idx);
458 for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
460 data_len = tx_pkt->data_len;
462 if (tx_pkt->next == NULL)
464 desc_p = descs + head_idx;
465 bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
467 wq_enet_desc_enc((struct wq_enet_desc *)
468 &desc_tmp, bus_addr, data_len,
469 mss, 0, 0, eop, eop, 0,
470 vlan_tag_insert, vlan_id, 0);
473 buf = &wq->bufs[head_idx];
474 buf->mb = (void *)tx_pkt;
475 head_idx = enic_ring_incr(desc_count, head_idx);
482 iowrite32(head_idx, &wq->ctrl->posted_index);
484 wq->ring.desc_avail = wq_desc_avail;
485 wq->head_idx = head_idx;