X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_rxtx.c;h=276a2e55960edef892077cf1273580db848d66a2;hb=3c36168618a6b1e47b7a3a9faaf941b2ed6afc95;hp=7dec486fe258817f44ff97ba27d71cb0fb8cb58a;hpb=ed933c35caab31f8923d9809d0dcae275feed000;p=dpdk.git diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 7dec486fe2..276a2e5596 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -233,10 +233,12 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; mbuf->packet_type |= RTE_PTYPE_L2_ETHER; } else { - if (vlan_tci != 0) + if (vlan_tci != 0) { + pkt_flags |= PKT_RX_VLAN; mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN; - else + } else { mbuf->packet_type |= RTE_PTYPE_L2_ETHER; + } } mbuf->vlan_tci = vlan_tci; @@ -310,7 +312,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct vnic_rq *rq; struct enic *enic = vnic_dev_priv(sop_rq->vdev); uint16_t cq_idx; - uint16_t rq_idx; + uint16_t rq_idx, max_rx; uint16_t rq_num; struct rte_mbuf *nmb, *rxmb; uint16_t nb_rx = 0; @@ -325,19 +327,23 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; + color = cq->last_color; data_rq = &enic->rq[sop_rq->data_queue_idx]; - while (nb_rx < nb_pkts) { + /* Receive until the end of the ring, at most. */ + max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx); + + while (max_rx) { volatile struct rq_enet_desc *rqd_ptr; struct cq_desc cqd; uint8_t packet_error; uint16_t ciflags; + max_rx--; + /* Check for pkts available */ - color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) - & CQ_DESC_COLOR_MASK; - if (color == cq->last_color) + if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color) break; /* Get the cq descriptor and extract rq info from it */ @@ -361,13 +367,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* Get the mbuf to return and replace with one just allocated */ rxmb = rq->mbuf_ring[rq_idx]; rq->mbuf_ring[rq_idx] = nmb; - - /* Increment cqd, rqd, mbuf_table index */ cq_idx++; - if (unlikely(cq_idx == cq->ring.desc_count)) { - cq_idx = 0; - cq->last_color = cq->last_color ? 0 : 1; - } /* Prefetch next mbuf & desc while processing current one */ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; @@ -419,6 +419,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd, tnl); enic_cq_rx_to_pkt_flags(&cqd, first_seg); + /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */ if (tnl) { first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK | @@ -438,6 +439,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* store the mbuf address into the next entry of the array */ rx_pkts[nb_rx++] = first_seg; } + if (unlikely(cq_idx == cq->ring.desc_count)) { + cq_idx = 0; + cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT; + } sop_rq->pkt_first_seg = first_seg; sop_rq->pkt_last_seg = last_seg; @@ -471,29 +476,118 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } -static void enic_fast_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) +uint16_t +enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) { - unsigned int desc_count, n, nb_to_free, tail_idx; - struct rte_mempool *pool; - struct rte_mbuf **m; + struct rte_mbuf *mb, **rx, **rxmb; + uint16_t cq_idx, nb_rx, max_rx; + struct cq_enet_rq_desc *cqd; + struct rq_enet_desc *rqd; + unsigned int port_id; + struct vnic_cq *cq; + struct vnic_rq *rq; + struct enic *enic; + uint8_t color; + bool overlay; + bool tnl; - desc_count = wq->ring.desc_count; - nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index) - + 1; - tail_idx = wq->tail_idx; - wq->tail_idx += nb_to_free; - wq->ring.desc_avail += nb_to_free; - if (wq->tail_idx >= desc_count) - wq->tail_idx -= desc_count; - /* First, free at most until the end of ring */ - m = &wq->bufs[tail_idx]; - pool = (*m)->pool; - n = RTE_MIN(nb_to_free, desc_count - tail_idx); - rte_mempool_put_bulk(pool, (void **)m, n); - n = nb_to_free - n; - /* Then wrap and free the rest */ - if (unlikely(n)) - rte_mempool_put_bulk(pool, (void **)wq->bufs, n); + rq = rx_queue; + enic = vnic_dev_priv(rq->vdev); + cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + cq_idx = cq->to_clean; + + /* + * Fill up the reserve of free mbufs. Below, we restock the receive + * ring with these mbufs to avoid allocation failures. + */ + if (rq->num_free_mbufs == 0) { + if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs, + ENIC_RX_BURST_MAX)) + return 0; + rq->num_free_mbufs = ENIC_RX_BURST_MAX; + } + + /* Receive until the end of the ring, at most. */ + max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs); + max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx); + + cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx; + color = cq->last_color; + rxmb = rq->mbuf_ring + cq_idx; + port_id = enic->port_id; + overlay = enic->overlay_offload; + + rx = rx_pkts; + while (max_rx) { + max_rx--; + if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color) + break; + if (unlikely(cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) { + rte_pktmbuf_free(*rxmb++); + rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); + cqd++; + continue; + } + + mb = *rxmb++; + /* prefetch mbuf data for caller */ + rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr, + RTE_PKTMBUF_HEADROOM)); + mb->data_len = cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + mb->pkt_len = mb->data_len; + mb->port = port_id; + tnl = overlay && (cqd->completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0; + mb->packet_type = + enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd, + tnl); + enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb); + /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */ + if (tnl) { + mb->packet_type &= ~(RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_MASK); + } + cqd++; + *rx++ = mb; + } + /* Number of descriptors visited */ + nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx; + if (nb_rx == 0) + return 0; + rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx; + rxmb = rq->mbuf_ring + cq_idx; + cq_idx += nb_rx; + rq->rx_nb_hold += nb_rx; + if (unlikely(cq_idx == cq->ring.desc_count)) { + cq_idx = 0; + cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT; + } + cq->to_clean = cq_idx; + + memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs, + sizeof(struct rte_mbuf *) * nb_rx); + rq->num_free_mbufs -= nb_rx; + while (nb_rx) { + nb_rx--; + mb = *rxmb++; + mb->data_off = RTE_PKTMBUF_HEADROOM; + rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM; + rqd++; + } + if (rq->rx_nb_hold > rq->rx_free_thresh) { + rq->posted_index = enic_ring_add(rq->ring.desc_count, + rq->posted_index, + rq->rx_nb_hold); + rq->rx_nb_hold = 0; + rte_wmb(); + iowrite32_relaxed(rq->posted_index, + &rq->ctrl->posted_index); + } + + return rx - rx_pkts; } static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) @@ -543,10 +637,7 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; if (wq->last_completed_index != completed_index) { - if (wq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) - enic_fast_free_wq_bufs(wq, completed_index); - else - enic_free_wq_bufs(wq, completed_index); + enic_free_wq_bufs(wq, completed_index); wq->last_completed_index = completed_index; } return 0; @@ -563,6 +654,10 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (i = 0; i != nb_pkts; i++) { m = tx_pkts[i]; + if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) { + rte_errno = EINVAL; + return i; + } ol_flags = m->ol_flags; if (ol_flags & wq->tx_offload_notsup_mask) { rte_errno = ENOTSUP; @@ -614,7 +709,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, wq_desc_avail = vnic_wq_desc_avail(wq); head_idx = wq->head_idx; desc_count = wq->ring.desc_count; - ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK; + ol_flags_mask = PKT_TX_VLAN | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK; tx_oversized = &enic->soft_stats.tx_oversized; nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX); @@ -642,7 +737,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, mss = 0; vlan_id = tx_pkt->vlan_tci; - vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT); + vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN); bus_addr = (dma_addr_t) (tx_pkt->buf_iova + tx_pkt->data_off); @@ -747,12 +842,33 @@ static void enqueue_simple_pkts(struct rte_mbuf **pkts, struct enic *enic) { struct rte_mbuf *p; + uint16_t mss; while (n) { n--; p = *pkts++; desc->address = p->buf_iova + p->data_off; desc->length = p->pkt_len; + /* VLAN insert */ + desc->vlan_tag = p->vlan_tci; + desc->header_length_flags &= + ((1 << WQ_ENET_FLAGS_EOP_SHIFT) | + (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT)); + if (p->ol_flags & PKT_TX_VLAN) { + desc->header_length_flags |= + 1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT; + } + /* + * Checksum offload. We use WQ_ENET_OFFLOAD_MODE_CSUM, which + * is 0, so no need to set offload_mode. + */ + mss = 0; + if (p->ol_flags & PKT_TX_IP_CKSUM) + mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT; + if (p->ol_flags & PKT_TX_L4_MASK) + mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT; + desc->mss_loopback = mss; + /* * The app should not send oversized * packets. tx_pkt_prepare includes a check as