X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k_rxtx.c;h=5c311218395c49bc2ef9ca0dc41ed238dee5c439;hb=57bb45b37bdf3752d95966adbf902b426120eb9d;hp=dd92a91e200f143554f1f6c002d485fb1bb09bc7;hpb=6046898f509732bed5186d8cdfc4502678019b2d;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c index dd92a91e20..5c31121839 100644 --- a/drivers/net/fm10k/fm10k_rxtx.c +++ b/drivers/net/fm10k/fm10k_rxtx.c @@ -1,40 +1,12 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2016 Intel Corporation */ #include -#include +#include #include +#include #include "fm10k.h" #include "base/fm10k_type.h" @@ -65,6 +37,17 @@ static inline void dump_rxd(union fm10k_rx_desc *rxd) } #endif +#define FM10K_TX_OFFLOAD_MASK ( \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) + +#define FM10K_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK) + /* @note: When this function is changed, make corresponding change to * fm10k_dev_supported_ptypes_get() */ @@ -96,6 +79,20 @@ rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d) if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK) m->ol_flags |= PKT_RX_RSS_HASH; + + if (unlikely((d->d.staterr & + (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) == + (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE))) + m->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + + if (unlikely((d->d.staterr & + (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) == + (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E))) + m->ol_flags |= PKT_RX_L4_CKSUM_BAD; + else + m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; } uint16_t @@ -114,10 +111,10 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh); for (count = 0; count < nb_pkts; ++count) { + if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD)) + break; mbuf = q->sw_ring[next_dd]; desc = q->hw_ring[next_dd]; - if (!(desc.d.staterr & FM10K_RXD_STATUS_DD)) - break; #ifdef RTE_LIBRTE_FM10K_DEBUG_RX dump_rxd(&desc); #endif @@ -134,10 +131,10 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * Packets in fm10k device always carry at least one VLAN tag. * For those packets coming in without VLAN tag, * the port default VLAN tag will be used. - * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci + * So, always PKT_RX_VLAN flag is set and vlan_tci * is valid for each RX packet's mbuf. */ - mbuf->ol_flags |= PKT_RX_VLAN_PKT; + mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; mbuf->vlan_tci = desc.w.vlan; /** * mbuf->vlan_tci_outer is an idle field in fm10k driver, @@ -174,7 +171,7 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, q->alloc_thresh); if (unlikely(ret != 0)) { - uint8_t port = q->port_id; + uint16_t port = q->port_id; PMD_RX_LOG(ERR, "Failed to alloc mbuf"); /* * Need to restore next_dd if we cannot allocate new @@ -228,10 +225,10 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh); for (count = 0; count < nb_seg; count++) { + if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD)) + break; mbuf = q->sw_ring[next_dd]; desc = q->hw_ring[next_dd]; - if (!(desc.d.staterr & FM10K_RXD_STATUS_DD)) - break; #ifdef RTE_LIBRTE_FM10K_DEBUG_RX dump_rxd(&desc); #endif @@ -295,10 +292,10 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * Packets in fm10k device always carry at least one VLAN tag. * For those packets coming in without VLAN tag, * the port default VLAN tag will be used. - * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci + * So, always PKT_RX_VLAN flag is set and vlan_tci * is valid for each RX packet's mbuf. */ - first_seg->ol_flags |= PKT_RX_VLAN_PKT; + first_seg->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; first_seg->vlan_tci = desc.w.vlan; /** * mbuf->vlan_tci_outer is an idle field in fm10k driver, @@ -332,7 +329,7 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, q->alloc_thresh); if (unlikely(ret != 0)) { - uint8_t port = q->port_id; + uint16_t port = q->port_id; PMD_RX_LOG(ERR, "Failed to alloc mbuf"); /* * Need to restore next_dd if we cannot allocate new @@ -369,6 +366,33 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rcv; } +uint32_t +fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define FM10K_RXQ_SCAN_INTERVAL 4 + volatile union fm10k_rx_desc *rxdp; + struct fm10k_rx_queue *rxq; + uint16_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &rxq->hw_ring[rxq->next_dd]; + while ((desc < rxq->nb_desc) && + rxdp->w.status & rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)) { + /** + * Check the DD bit of a rx descriptor of each group of 4 desc, + * to avoid checking too frequently and downgrading performance + * too much. + */ + desc += FM10K_RXQ_SCAN_INTERVAL; + rxdp += FM10K_RXQ_SCAN_INTERVAL; + if (rxq->next_dd + desc >= rxq->nb_desc) + rxdp = &rxq->hw_ring[rxq->next_dd + desc - + rxq->nb_desc]; + } + + return desc; +} + int fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) { @@ -394,6 +418,84 @@ fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) return ret; } +int +fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + volatile union fm10k_rx_desc *rxdp; + struct fm10k_rx_queue *rxq = rx_queue; + uint16_t nb_hold, trigger_last; + uint16_t desc; + int ret; + + if (unlikely(offset >= rxq->nb_desc)) { + PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset); + return 0; + } + + if (rxq->next_trigger < rxq->alloc_thresh) + trigger_last = rxq->next_trigger + + rxq->nb_desc - rxq->alloc_thresh; + else + trigger_last = rxq->next_trigger - rxq->alloc_thresh; + + if (rxq->next_dd < trigger_last) + nb_hold = rxq->next_dd + rxq->nb_desc - trigger_last; + else + nb_hold = rxq->next_dd - trigger_last; + + if (offset >= rxq->nb_desc - nb_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->next_dd + offset; + if (desc >= rxq->nb_desc) + desc -= rxq->nb_desc; + + rxdp = &rxq->hw_ring[desc]; + + ret = !!(rxdp->w.status & + rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)); + + return ret; +} + +int +fm10k_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + volatile struct fm10k_tx_desc *txdp; + struct fm10k_tx_queue *txq = tx_queue; + uint16_t desc; + uint16_t next_rs = txq->nb_desc; + struct fifo rs_tracker = txq->rs_tracker; + struct fifo *r = &rs_tracker; + + if (unlikely(offset >= txq->nb_desc)) + return -EINVAL; + + desc = txq->next_free + offset; + /* go to next desc that has the RS bit */ + desc = (desc / txq->rs_thresh + 1) * + txq->rs_thresh - 1; + + if (desc >= txq->nb_desc) { + desc -= txq->nb_desc; + if (desc >= txq->nb_desc) + desc -= txq->nb_desc; + } + + r->head = r->list; + for ( ; r->head != r->endp; ) { + if (*r->head >= desc && *r->head < next_rs) + next_rs = *r->head; + ++r->head; + } + + txdp = &txq->hw_ring[next_rs]; + if (txdp->flags & FM10K_TXD_FLAG_DONE) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + /* * Free multiple TX mbuf at a time if they are in the same pool * @@ -410,12 +512,12 @@ static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num) if (unlikely(num == 0)) return; - m = __rte_pktmbuf_prefree_seg(txep[0]); + m = rte_pktmbuf_prefree_seg(txep[0]); if (likely(m != NULL)) { free[0] = m; nb_free = 1; for (i = 1; i < num; i++) { - m = __rte_pktmbuf_prefree_seg(txep[i]); + m = rte_pktmbuf_prefree_seg(txep[i]); if (likely(m != NULL)) { if (likely(m->pool == free[0]->pool)) free[nb_free++] = m; @@ -431,7 +533,7 @@ static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num) rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); } else { for (i = 1; i < num; i++) { - m = __rte_pktmbuf_prefree_seg(txep[i]); + m = rte_pktmbuf_prefree_seg(txep[i]); if (m != NULL) rte_mempool_put(m->pool, m); txep[i] = NULL; @@ -517,8 +619,9 @@ static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb) rte_cpu_to_le_16(rte_pktmbuf_data_len(mb)); if (mb->ol_flags & PKT_TX_TCP_SEG) { - hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len + - mb->l3_len + mb->l4_len; + hdrlen = mb->l2_len + mb->l3_len + mb->l4_len; + hdrlen += (mb->ol_flags & PKT_TX_TUNNEL_MASK) ? + mb->outer_l2_len + mb->outer_l3_len : 0; if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG) hdrlen += sizeof(struct fm10k_ftag); @@ -583,3 +686,41 @@ fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return count; } + +uint16_t +fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + if ((m->ol_flags & PKT_TX_TCP_SEG) && + (m->tso_segsz < FM10K_TSO_MINMSS)) { + rte_errno = EINVAL; + return i; + } + + if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +}