1 /* Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
2 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * Copyright (c) 2014, Cisco Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_prefetch.h>
37 #include "enic_compat.h"
38 #include "rq_enet_desc.h"
40 #include "enic_vnic_wq.h"
42 #define RTE_PMD_USE_PREFETCH
44 #ifdef RTE_PMD_USE_PREFETCH
45 /*Prefetch a cache line into all cache levels. */
46 #define rte_enic_prefetch(p) rte_prefetch0(p)
48 #define rte_enic_prefetch(p) do {} while (0)
51 #ifdef RTE_PMD_PACKET_PREFETCH
52 #define rte_packet_prefetch(p) rte_prefetch1(p)
54 #define rte_packet_prefetch(p) do {} while (0)
57 static inline uint16_t
58 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
60 return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
63 static inline uint16_t
64 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
66 return le16_to_cpu(crd->bytes_written_flags) &
67 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
71 enic_cq_rx_desc_packet_error(uint16_t bwflags)
73 return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
74 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
78 enic_cq_rx_desc_eop(uint16_t ciflags)
80 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
81 == CQ_ENET_RQ_DESC_FLAGS_EOP;
85 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
87 return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
88 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
89 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
93 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
95 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
96 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
100 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
102 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
103 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
106 static inline uint8_t
107 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
109 return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
110 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
113 static inline uint32_t
114 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
116 return le32_to_cpu(cqrd->rss_hash);
119 static inline uint16_t
120 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
122 return le16_to_cpu(cqrd->vlan);
125 static inline uint16_t
126 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
128 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
129 return le16_to_cpu(cqrd->bytes_written_flags) &
130 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
133 static inline uint8_t
134 enic_cq_rx_check_err(struct cq_desc *cqd)
136 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
139 bwflags = enic_cq_rx_desc_bwflags(cqrd);
140 if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
145 /* Lookup table to translate RX CQ flags to mbuf flags. */
146 static inline uint32_t
147 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
149 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
150 uint8_t cqrd_flags = cqrd->flags;
151 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
152 [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
153 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
155 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
157 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
159 [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
160 [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
162 [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
164 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
166 /* All others reserved */
168 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
169 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
170 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
171 return cq_type_table[cqrd_flags];
175 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
177 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
178 uint16_t ciflags, bwflags, pkt_flags = 0;
179 ciflags = enic_cq_rx_desc_ciflags(cqrd);
180 bwflags = enic_cq_rx_desc_bwflags(cqrd);
184 /* flags are meaningless if !EOP */
185 if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
186 goto mbuf_flags_done;
189 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
190 pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
191 mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
197 if (enic_cq_rx_desc_rss_type(cqrd)) {
198 pkt_flags |= PKT_RX_RSS_HASH;
199 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
203 if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
204 (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
205 if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
206 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
207 if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
208 if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
209 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
214 mbuf->ol_flags = pkt_flags;
218 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
221 struct vnic_rq *rq = rx_queue;
222 struct enic *enic = vnic_dev_priv(rq->vdev);
224 struct rte_mbuf *nmb, *rxmb;
225 uint16_t nb_rx = 0, nb_err = 0;
228 volatile struct cq_desc *cqd_ptr;
231 cq = &enic->cq[enic_cq_rq(enic, rq->index)];
232 rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
233 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
235 nb_hold = rq->rx_nb_hold; /* mbufs held by software */
237 while (nb_rx < nb_pkts) {
238 volatile struct rq_enet_desc *rqd_ptr;
241 uint8_t packet_error;
243 /* Check for pkts available */
244 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
245 & CQ_DESC_COLOR_MASK;
246 if (color == cq->last_color)
249 /* Get the cq descriptor and rq pointer */
251 rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
253 /* allocate a new mbuf */
254 nmb = rte_mbuf_raw_alloc(rq->mp);
256 rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
260 /* A packet error means descriptor and data are untrusted */
261 packet_error = enic_cq_rx_check_err(&cqd);
263 /* Get the mbuf to return and replace with one just allocated */
264 rxmb = rq->mbuf_ring[rx_id];
265 rq->mbuf_ring[rx_id] = nmb;
267 /* Increment cqd, rqd, mbuf_table index */
269 if (unlikely(rx_id == rq->ring.desc_count)) {
271 cq->last_color = cq->last_color ? 0 : 1;
274 /* Prefetch next mbuf & desc while processing current one */
275 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
276 rte_enic_prefetch(cqd_ptr);
277 rte_enic_prefetch(rq->mbuf_ring[rx_id]);
278 rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
281 /* Push descriptor for newly allocated mbuf */
282 dma_addr = (dma_addr_t)(nmb->buf_physaddr
283 + RTE_PKTMBUF_HEADROOM);
284 rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
285 rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
286 - RTE_PKTMBUF_HEADROOM);
288 /* Drop incoming bad packet */
289 if (unlikely(packet_error)) {
290 rte_pktmbuf_free(rxmb);
295 /* Fill in the rest of the mbuf */
296 rxmb->data_off = RTE_PKTMBUF_HEADROOM;
299 rxmb->port = enic->port_id;
300 rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
301 rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
302 enic_cq_rx_to_pkt_flags(&cqd, rxmb);
303 rxmb->data_len = rxmb->pkt_len;
305 /* prefetch mbuf data for caller */
306 rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
307 RTE_PKTMBUF_HEADROOM));
309 /* store the mbuf address into the next entry of the array */
310 rx_pkts[nb_rx++] = rxmb;
313 nb_hold += nb_rx + nb_err;
314 cq->to_clean = rx_id;
316 if (nb_hold > rq->rx_free_thresh) {
317 rq->posted_index = enic_ring_add(rq->ring.desc_count,
318 rq->posted_index, nb_hold);
321 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
324 rq->rx_nb_hold = nb_hold;
329 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
331 struct vnic_wq_buf *buf;
332 struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
333 unsigned int nb_to_free, nb_free = 0, i;
334 struct rte_mempool *pool;
335 unsigned int tail_idx;
336 unsigned int desc_count = wq->ring.desc_count;
338 nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
340 tail_idx = wq->tail_idx;
341 buf = &wq->bufs[tail_idx];
342 pool = ((struct rte_mbuf *)buf->mb)->pool;
343 for (i = 0; i < nb_to_free; i++) {
344 buf = &wq->bufs[tail_idx];
345 m = (struct rte_mbuf *)(buf->mb);
346 if (likely(m->pool == pool)) {
349 rte_mempool_put_bulk(pool, (void *)free, nb_free);
354 tail_idx = enic_ring_incr(desc_count, tail_idx);
358 rte_mempool_put_bulk(pool, (void **)free, nb_free);
360 wq->tail_idx = tail_idx;
361 wq->ring.desc_avail += nb_to_free;
364 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
368 completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
370 if (wq->last_completed_index != completed_index) {
371 enic_free_wq_bufs(wq, completed_index);
372 wq->last_completed_index = completed_index;
377 void enic_post_wq_index(struct vnic_wq *wq)
379 enic_vnic_post_wq_index(wq);
382 void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
383 struct rte_mbuf *tx_pkt, unsigned short len,
384 uint8_t sop, uint8_t eop, uint8_t cq_entry,
385 uint16_t ol_flags, uint16_t vlan_tag)
387 struct wq_enet_desc *desc, *descs;
389 uint8_t vlan_tag_insert = 0;
390 uint64_t bus_addr = (dma_addr_t)
391 (tx_pkt->buf_physaddr + tx_pkt->data_off);
393 descs = (struct wq_enet_desc *)wq->ring.descs;
394 desc = descs + wq->head_idx;
397 if (ol_flags & PKT_TX_VLAN_PKT)
400 if (enic->hw_ip_checksum) {
401 if (ol_flags & PKT_TX_IP_CKSUM)
402 mss |= ENIC_CALC_IP_CKSUM;
404 if (ol_flags & PKT_TX_TCP_UDP_CKSUM)
405 mss |= ENIC_CALC_TCP_UDP_CKSUM;
409 wq_enet_desc_enc(desc,
413 0 /* header_length */,
414 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */,
422 enic_vnic_post_wq(wq, (void *)tx_pkt, cq_entry);
425 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
430 unsigned int pkt_len;
431 unsigned int seg_len;
432 unsigned int inc_len;
433 unsigned int nb_segs;
434 struct rte_mbuf *tx_pkt, *next_tx_pkt;
435 struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
436 struct enic *enic = vnic_dev_priv(wq->vdev);
437 unsigned short vlan_id;
438 unsigned short ol_flags;
439 uint8_t last_seg, eop;
440 unsigned int host_tx_descs = 0;
442 for (index = 0; index < nb_pkts; index++) {
445 nb_segs = tx_pkt->nb_segs;
446 if (nb_segs > vnic_wq_desc_avail(wq)) {
448 enic_post_wq_index(wq);
450 /* wq cleanup and try again */
451 if (!enic_cleanup_wq(enic, wq) ||
452 (nb_segs > vnic_wq_desc_avail(wq))) {
457 pkt_len = tx_pkt->pkt_len;
458 vlan_id = tx_pkt->vlan_tci;
459 ol_flags = tx_pkt->ol_flags;
460 for (frags = 0; inc_len < pkt_len; frags++) {
463 next_tx_pkt = tx_pkt->next;
464 seg_len = tx_pkt->data_len;
470 if ((pkt_len == inc_len) || !next_tx_pkt) {
472 /* post if last packet in batch or > thresh */
473 if ((index == (nb_pkts - 1)) ||
474 (host_tx_descs > ENIC_TX_POST_THRESH)) {
479 enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
480 !frags, eop, last_seg, ol_flags, vlan_id);
481 tx_pkt = next_tx_pkt;
485 enic_cleanup_wq(enic, wq);