X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_rxtx.c;h=2fe5a3fa3de0ffb2962ee30ae62348f4ce154c65;hb=4aab7f07a6457b05594995c263cddc237c0d3b38;hp=343dabc644e6b5365b0d75d5ba3a7fcb9f73c4a3;hpb=ed6e564c214e9852ff7f3d8f676a892dda905651;p=dpdk.git diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 343dabc644..2fe5a3fa3d 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -1,37 +1,11 @@ -/* Copyright 2008-2016 Cisco Systems, Inc. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. - * - * Copyright (c) 2014, Cisco Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ #include -#include +#include +#include #include #include "enic_compat.h" @@ -41,6 +15,15 @@ #include #include +#define ENIC_TX_OFFLOAD_MASK ( \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) + +#define ENIC_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK) + #define RTE_PMD_USE_PREFETCH #ifdef RTE_PMD_USE_PREFETCH @@ -132,59 +115,6 @@ enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; } -/* Find the offset to L5. This is needed by enic TSO implementation. - * Return 0 if not a TCP packet or can't figure out the length. - */ -static inline uint8_t tso_header_len(struct rte_mbuf *mbuf) -{ - struct ether_hdr *eh; - struct vlan_hdr *vh; - struct ipv4_hdr *ip4; - struct ipv6_hdr *ip6; - struct tcp_hdr *th; - uint8_t hdr_len; - uint16_t ether_type; - - /* offset past Ethernet header */ - eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - ether_type = eh->ether_type; - hdr_len = sizeof(struct ether_hdr); - if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) { - vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len); - ether_type = vh->eth_proto; - hdr_len += sizeof(struct vlan_hdr); - } - - /* offset past IP header */ - switch (rte_be_to_cpu_16(ether_type)) { - case ETHER_TYPE_IPv4: - ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len); - if (ip4->next_proto_id != IPPROTO_TCP) - return 0; - hdr_len += (ip4->version_ihl & 0xf) * 4; - break; - case ETHER_TYPE_IPv6: - ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len); - if (ip6->proto != IPPROTO_TCP) - return 0; - hdr_len += sizeof(struct ipv6_hdr); - break; - default: - return 0; - } - - if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len) - return 0; - - /* offset past TCP header */ - th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len); - hdr_len += (th->data_off >> 4) * 4; - - if (hdr_len > mbuf->pkt_len) - return 0; - - return hdr_len; -} static inline uint8_t enic_cq_rx_check_err(struct cq_desc *cqd) @@ -230,20 +160,13 @@ static inline void enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) { struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci; - ciflags = enic_cq_rx_desc_ciflags(cqrd); + uint16_t bwflags, pkt_flags = 0, vlan_tci; bwflags = enic_cq_rx_desc_bwflags(cqrd); vlan_tci = enic_cq_rx_desc_vlan(cqrd); - mbuf->ol_flags = 0; - - /* flags are meaningless if !EOP */ - if (unlikely(!enic_cq_rx_desc_eop(ciflags))) - goto mbuf_flags_done; - /* VLAN STRIPPED flag. The L2 packet type updated here also */ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { - pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; mbuf->packet_type |= RTE_PTYPE_L2_ETHER; } else { if (vlan_tci != 0) @@ -253,27 +176,37 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) } mbuf->vlan_tci = vlan_tci; - /* RSS flag */ - if (enic_cq_rx_desc_rss_type(cqrd)) { + if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) { + struct cq_enet_rq_clsf_desc *clsf_cqd; + uint16_t filter_id; + clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd; + filter_id = clsf_cqd->filter_id; + if (filter_id) { + pkt_flags |= PKT_RX_FDIR; + if (filter_id != ENIC_MAGIC_FILTER_ID) { + mbuf->hash.fdir.hi = clsf_cqd->filter_id; + pkt_flags |= PKT_RX_FDIR_ID; + } + } + } else if (enic_cq_rx_desc_rss_type(cqrd)) { + /* RSS flag */ pkt_flags |= PKT_RX_RSS_HASH; mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); } /* checksum flags */ - if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) { - if (enic_cq_rx_desc_csum_not_calc(cqrd)) - pkt_flags |= (PKT_RX_IP_CKSUM_UNKNOWN & - PKT_RX_L4_CKSUM_UNKNOWN); - else { + if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) { + if (!enic_cq_rx_desc_csum_not_calc(cqrd)) { uint32_t l4_flags; l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK; if (enic_cq_rx_desc_ipv4_csum_ok(cqrd)) pkt_flags |= PKT_RX_IP_CKSUM_GOOD; - else + else if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) pkt_flags |= PKT_RX_IP_CKSUM_BAD; - if (l4_flags & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { + if (l4_flags == RTE_PTYPE_L4_UDP || + l4_flags == RTE_PTYPE_L4_TCP) { if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)) pkt_flags |= PKT_RX_L4_CKSUM_GOOD; else @@ -282,7 +215,6 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) } } - mbuf_flags_done: mbuf->ol_flags = pkt_flags; } @@ -325,7 +257,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, while (nb_rx < nb_pkts) { volatile struct rq_enet_desc *rqd_ptr; - dma_addr_t dma_addr; struct cq_desc cqd; uint8_t packet_error; uint16_t ciflags; @@ -374,19 +305,19 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* Push descriptor for newly allocated mbuf */ nmb->data_off = RTE_PKTMBUF_HEADROOM; - dma_addr = (dma_addr_t)(nmb->buf_physaddr + - RTE_PKTMBUF_HEADROOM); - rq_enet_desc_enc(rqd_ptr, dma_addr, - (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP - : RQ_ENET_TYPE_NOT_SOP), - nmb->buf_len - RTE_PKTMBUF_HEADROOM); + /* + * Only the address needs to be refilled. length_type of the + * descriptor it set during initialization + * (enic_alloc_rx_queue_mbufs) and does not change. + */ + rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova + + RTE_PKTMBUF_HEADROOM); /* Fill in the rest of the mbuf */ seg_length = enic_cq_rx_desc_n_bytes(&cqd); if (rq->is_sop) { first_seg = rxmb; - first_seg->nb_segs = 1; first_seg->pkt_len = seg_length; } else { first_seg->pkt_len = (uint16_t)(first_seg->pkt_len @@ -395,7 +326,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, last_seg->next = rxmb; } - rxmb->next = NULL; rxmb->port = enic->port_id; rxmb->data_len = seg_length; @@ -473,7 +403,7 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) pool = ((struct rte_mbuf *)buf->mb)->pool; for (i = 0; i < nb_to_free; i++) { buf = &wq->bufs[tail_idx]; - m = __rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb)); + m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb)); buf->mb = NULL; if (unlikely(m == NULL)) { @@ -493,7 +423,8 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) tail_idx = enic_ring_incr(desc_count, tail_idx); } - rte_mempool_put_bulk(pool, (void **)free, nb_free); + if (nb_free > 0) + rte_mempool_put_bulk(pool, (void **)free, nb_free); wq->tail_idx = tail_idx; wq->ring.desc_avail += nb_to_free; @@ -512,6 +443,38 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) return 0; } +uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int32_t ret; + uint16_t i; + uint64_t ol_flags; + struct rte_mbuf *m; + + for (i = 0; i != nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + return i; + } +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; +} + uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -535,12 +498,15 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint64_t bus_addr; uint8_t offload_mode; uint16_t header_len; + uint64_t tso; + rte_atomic64_t *tx_oversized; enic_cleanup_wq(enic, wq); wq_desc_avail = vnic_wq_desc_avail(wq); head_idx = wq->head_idx; desc_count = wq->ring.desc_count; ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK; + tx_oversized = &enic->soft_stats.tx_oversized; nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX); @@ -550,10 +516,12 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, data_len = tx_pkt->data_len; ol_flags = tx_pkt->ol_flags; nb_segs = tx_pkt->nb_segs; + tso = ol_flags & PKT_TX_TCP_SEG; - if (pkt_len > ENIC_TX_MAX_PKT_SIZE) { + /* drop packet if it's too big to send */ + if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) { rte_pktmbuf_free(tx_pkt); - rte_atomic64_inc(&enic->soft_stats.tx_oversized); + rte_atomic64_inc(tx_oversized); continue; } @@ -564,10 +532,10 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } mss = 0; - vlan_id = 0; - vlan_tag_insert = 0; + vlan_id = tx_pkt->vlan_tci; + vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT); bus_addr = (dma_addr_t) - (tx_pkt->buf_physaddr + tx_pkt->data_off); + (tx_pkt->buf_iova + tx_pkt->data_off); descs = (struct wq_enet_desc *)wq->ring.descs; desc_p = descs + head_idx; @@ -576,13 +544,22 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM; header_len = 0; - if (tx_pkt->tso_segsz) { - header_len = tso_header_len(tx_pkt); - if (header_len) { - offload_mode = WQ_ENET_OFFLOAD_MODE_TSO; - mss = tx_pkt->tso_segsz; + if (tso) { + header_len = tx_pkt->l2_len + tx_pkt->l3_len + + tx_pkt->l4_len; + + /* Drop if non-TCP packet or TSO seg size is too big */ + if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz + + header_len) > ENIC_TX_MAX_PKT_SIZE))) { + rte_pktmbuf_free(tx_pkt); + rte_atomic64_inc(tx_oversized); + continue; } + + offload_mode = WQ_ENET_OFFLOAD_MODE_TSO; + mss = tx_pkt->tso_segsz; } + if ((ol_flags & ol_flags_mask) && (header_len == 0)) { if (ol_flags & PKT_TX_IP_CKSUM) mss |= ENIC_CALC_IP_CKSUM; @@ -596,10 +573,6 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } } - if (ol_flags & PKT_TX_VLAN_PKT) { - vlan_tag_insert = 1; - vlan_id = tx_pkt->vlan_tci; - } wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len, offload_mode, eop, eop, 0, vlan_tag_insert, @@ -619,7 +592,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (tx_pkt->next == NULL) eop = 1; desc_p = descs + head_idx; - bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr + bus_addr = (dma_addr_t)(tx_pkt->buf_iova + tx_pkt->data_off); wq_enet_desc_enc((struct wq_enet_desc *) &desc_tmp, bus_addr, data_len,