X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_ixgbe%2Fixgbe_rxtx.c;h=8559ef66573a434807382170ef3bd353439e4cac;hb=fdf20fa7bee9df9037116318a87080e1eb7e757e;hp=30a3aafb29037a3eb6c7e866ecd856630c35091d;hpb=cdb5f15cdda859bcc411dbad3b63292b6f8eef90;p=dpdk.git diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c old mode 100755 new mode 100644 index 30a3aafb29..8559ef6657 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -1,40 +1,39 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright 2014 6WIND S.A. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include -#include #include #include #include @@ -78,13 +77,26 @@ #include "ixgbe/ixgbe_vf.h" #include "ixgbe_ethdev.h" #include "ixgbe/ixgbe_dcb.h" - - -#define RTE_PMD_IXGBE_TX_MAX_BURST 32 - -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC -#define RTE_PMD_IXGBE_RX_MAX_BURST 32 -#endif +#include "ixgbe/ixgbe_common.h" +#include "ixgbe_rxtx.h" + +#define IXGBE_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_IPV4_TCP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_IPV6_EX | \ + ETH_RSS_IPV6_TCP | \ + ETH_RSS_IPV6_TCP_EX | \ + ETH_RSS_IPV4_UDP | \ + ETH_RSS_IPV6_UDP | \ + ETH_RSS_IPV6_UDP_EX) + +/* Bit Mask to indicate what bits required for building TX context */ +#define IXGBE_TX_OFFLOAD_MASK ( \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) static inline struct rte_mbuf * rte_rxmbuf_alloc(struct rte_mempool *mp) @@ -92,117 +104,10 @@ rte_rxmbuf_alloc(struct rte_mempool *mp) struct rte_mbuf *m; m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0); + __rte_mbuf_sanity_check_raw(m, 0); return (m); } -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \ - (char *)(mb)->buf_addr)) - -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) - -/** - * Structure associated with each descriptor of the RX ring of a RX queue. - */ -struct igb_rx_entry { - struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ -}; - -/** - * Structure associated with each descriptor of the TX ring of a TX queue. - */ -struct igb_tx_entry { - struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ - uint16_t next_id; /**< Index of next descriptor in ring. */ - uint16_t last_id; /**< Index of last scattered descriptor. */ -}; - -/** - * Structure associated with each RX queue. - */ -struct igb_rx_queue { - struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ - volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */ - uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ - volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ - struct igb_rx_entry *sw_ring; /**< address of RX software ring. */ - struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ - struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ - uint16_t nb_rx_desc; /**< number of RX descriptors. */ - uint16_t rx_tail; /**< current value of RDT register. */ - uint16_t nb_rx_hold; /**< number of held free RX desc. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC - uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */ - uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */ - uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ -#endif - uint16_t rx_free_thresh; /**< max free RX desc to hold. */ - uint16_t queue_id; /**< RX queue index. */ - uint8_t port_id; /**< Device port identifier. */ - uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ - uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC - /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ - struct rte_mbuf fake_mbuf; - /** hold packets to return to application */ - struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2]; -#endif -}; - -/** - * IXGBE CTX Constants - */ -enum ixgbe_advctx_num { - IXGBE_CTX_0 = 0, /**< CTX0 */ - IXGBE_CTX_1 = 1, /**< CTX1 */ - IXGBE_CTX_NUM = 2, /**< CTX NUMBER */ -}; - -/** - * Structure to check if new context need be built - */ - -struct ixgbe_advctx_info { - uint16_t flags; /**< ol_flags for context build. */ - uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */ - union rte_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */ -}; - -/** - * Structure associated with each TX queue. - */ -struct igb_tx_queue { - /** TX ring virtual address. */ - volatile union ixgbe_adv_tx_desc *tx_ring; - uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ - struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */ - volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ - uint16_t nb_tx_desc; /**< number of TX descriptors. */ - uint16_t tx_tail; /**< current value of TDT reg. */ - uint16_t tx_free_thresh;/**< minimum TX before freeing. */ - /** Number of TX descriptors to use before RS bit is set. */ - uint16_t tx_rs_thresh; - /** Number of TX descriptors used since RS bit was set. */ - uint16_t nb_tx_used; - /** Index to last TX descriptor to have been cleaned. */ - uint16_t last_desc_cleaned; - /** Total number of TX descriptors ready to be allocated. */ - uint16_t nb_tx_free; - uint16_t tx_next_dd; /**< next desc to scan for DD bit */ - uint16_t tx_next_rs; /**< next desc to set RS bit */ - uint16_t queue_id; /**< TX queue index. */ - uint8_t port_id; /**< Device port identifier. */ - uint8_t pthresh; /**< Prefetch threshold register. */ - uint8_t hthresh; /**< Host threshold register. */ - uint8_t wthresh; /**< Write-back threshold reg. */ - uint32_t txq_flags; /**< Holds flags for this TXq */ - uint32_t ctx_curr; /**< Hardware context states. */ - /** Hardware context0 history. */ - struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; -}; - #if 1 #define RTE_PMD_USE_PREFETCH @@ -217,37 +122,17 @@ struct igb_tx_queue { #define rte_ixgbe_prefetch(p) do {} while(0) #endif -#ifdef RTE_PMD_PACKET_PREFETCH -#define rte_packet_prefetch(p) rte_prefetch1(p) -#else -#define rte_packet_prefetch(p) do {} while(0) -#endif - /********************************************************************* * * TX functions * **********************************************************************/ -/* - * The "simple" TX queue functions require that the following - * flags are set when the TX queue is configured: - * - ETH_TXQ_FLAGS_NOMULTSEGS - * - ETH_TXQ_FLAGS_NOVLANOFFL - * - ETH_TXQ_FLAGS_NOXSUMSCTP - * - ETH_TXQ_FLAGS_NOXSUMUDP - * - ETH_TXQ_FLAGS_NOXSUMTCP - * and that the RS bit threshold (tx_rs_thresh) is at least equal to - * RTE_PMD_IXGBE_TX_MAX_BURST. - */ -#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - /* * Check for descriptors with their DD bit set and free mbufs. * Return the total number of buffers freed. */ -static inline int +static inline int __attribute__((always_inline)) ixgbe_tx_free_bufs(struct igb_tx_queue *txq) { struct igb_tx_entry *txep; @@ -265,13 +150,10 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq) */ txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]); - /* prefetch the mbufs that are about to be freed */ - for (i = 0; i < txq->tx_rs_thresh; ++i) - rte_prefetch0((txep + i)->mbuf); - /* free buffers one at a time */ if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) { for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { + txep->mbuf->next = NULL; rte_mempool_put(txep->mbuf->pool, txep->mbuf); txep->mbuf = NULL; } @@ -283,27 +165,14 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq) } /* buffers were freed, update counters */ - txq->nb_tx_free += txq->tx_rs_thresh; - txq->tx_next_dd += txq->tx_rs_thresh; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); if (txq->tx_next_dd >= txq->nb_tx_desc) - txq->tx_next_dd = txq->tx_rs_thresh - 1; + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); return txq->tx_rs_thresh; } -/* - * Populate descriptors with the following info: - * 1.) buffer_addr = phys_addr + headroom - * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len - * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT - */ - -/* Defines for Tx descriptor */ -#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\ - IXGBE_ADVTXD_DCMD_IFCS |\ - IXGBE_ADVTXD_DCMD_DEXT |\ - IXGBE_ADVTXD_DCMD_EOP) - /* Populate 4 descriptors with data from 4 mbufs */ static inline void tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) @@ -314,7 +183,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) for (i = 0; i < 4; ++i, ++txdp, ++pkts) { buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts); - pkt_len = (*pkts)->pkt.data_len; + pkt_len = (*pkts)->data_len; /* write data to descriptor */ txdp->read.buffer_addr = buf_dma_addr; @@ -322,6 +191,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); txdp->read.olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } } @@ -333,7 +203,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) uint32_t pkt_len; buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts); - pkt_len = (*pkts)->pkt.data_len; + pkt_len = (*pkts)->data_len; /* write data to descriptor */ txdp->read.buffer_addr = buf_dma_addr; @@ -341,6 +211,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); txdp->read.olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } /* @@ -397,12 +268,12 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ixgbe_tx_free_bufs(txq); /* Only use descriptors that are available */ - nb_pkts = RTE_MIN(txq->nb_tx_free, nb_pkts); + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) return 0; /* Use exactly nb_pkts descriptors */ - txq->nb_tx_free -= nb_pkts; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); /* * At this point, we know there are enough descriptors in the @@ -417,7 +288,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * the processing looks just like the "bottom" part anyway... */ if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) { - n = txq->nb_tx_desc - txq->tx_tail; + n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail); ixgbe_tx_fill_hw_ring(txq, tx_pkts, n); /* @@ -427,14 +298,14 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ tx_r[txq->tx_next_rs].read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); - txq->tx_next_rs = txq->tx_rs_thresh - 1; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); txq->tx_tail = 0; } /* Fill H/W descriptor ring with mbuf data */ - ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, nb_pkts - n); - txq->tx_tail += (nb_pkts - n); + ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); + txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); /* * Determine if RS bit should be set @@ -446,9 +317,10 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (txq->tx_tail > txq->tx_next_rs) { tx_r[txq->tx_next_rs].read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); - txq->tx_next_rs += txq->tx_rs_thresh; + txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + + txq->tx_rs_thresh); if (txq->tx_next_rs >= txq->nb_tx_desc) - txq->tx_next_rs = txq->tx_rs_thresh - 1; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); } /* @@ -479,10 +351,10 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, nb_tx = 0; while (nb_pkts) { uint16_t ret, n; - n = RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST); + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST); ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n); - nb_tx += ret; - nb_pkts -= ret; + nb_tx = (uint16_t)(nb_tx + ret); + nb_pkts = (uint16_t)(nb_pkts - ret); if (ret < n) break; } @@ -493,59 +365,84 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, static inline void ixgbe_set_xmit_ctx(struct igb_tx_queue* txq, volatile struct ixgbe_adv_tx_context_desc *ctx_txd, - uint16_t ol_flags, uint32_t vlan_macip_lens) + uint64_t ol_flags, union ixgbe_tx_offload tx_offload) { uint32_t type_tucmd_mlhl; - uint32_t mss_l4len_idx; + uint32_t mss_l4len_idx = 0; uint32_t ctx_idx; - uint32_t cmp_mask; + uint32_t vlan_macip_lens; + union ixgbe_tx_offload tx_offload_mask; ctx_idx = txq->ctx_curr; - cmp_mask = 0; + tx_offload_mask.data = 0; type_tucmd_mlhl = 0; + /* Specify which HW CTX to upload. */ + mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT); + if (ol_flags & PKT_TX_VLAN_PKT) { - cmp_mask |= TX_VLAN_CMP_MASK; + tx_offload_mask.vlan_tci = ~0; } - if (ol_flags & PKT_TX_IP_CKSUM) { - type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4; - cmp_mask |= TX_MAC_LEN_CMP_MASK; - } + /* check if TCP segmentation required for this packet */ + if (ol_flags & PKT_TX_TCP_SEG) { + /* implies IP cksum and TCP cksum */ + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 | + IXGBE_ADVTXD_TUCMD_L4T_TCP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + + tx_offload_mask.l2_len = ~0; + tx_offload_mask.l3_len = ~0; + tx_offload_mask.l4_len = ~0; + tx_offload_mask.tso_segsz = ~0; + mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT; + } else { /* no TSO, check if hardware checksum is needed */ + if (ol_flags & PKT_TX_IP_CKSUM) { + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4; + tx_offload_mask.l2_len = ~0; + tx_offload_mask.l3_len = ~0; + } - /* Specify which HW CTX to upload. */ - mss_l4len_idx = (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT); - switch (ol_flags & PKT_TX_L4_MASK) { - case PKT_TX_UDP_CKSUM: - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP | + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; - cmp_mask |= TX_MACIP_LEN_CMP_MASK; - break; - case PKT_TX_TCP_CKSUM: - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP | + mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + tx_offload_mask.l2_len = ~0; + tx_offload_mask.l3_len = ~0; + break; + case PKT_TX_TCP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; - cmp_mask |= TX_MACIP_LEN_CMP_MASK; - break; - case PKT_TX_SCTP_CKSUM: - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP | + mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + tx_offload_mask.l2_len = ~0; + tx_offload_mask.l3_len = ~0; + tx_offload_mask.l4_len = ~0; + break; + case PKT_TX_SCTP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; - cmp_mask |= TX_MACIP_LEN_CMP_MASK; - break; - default: - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV | + mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + tx_offload_mask.l2_len = ~0; + tx_offload_mask.l3_len = ~0; + break; + default: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - break; + break; + } } txq->ctx_cache[ctx_idx].flags = ol_flags; - txq->ctx_cache[ctx_idx].cmp_mask = cmp_mask; - txq->ctx_cache[ctx_idx].vlan_macip_lens.data = - vlan_macip_lens & cmp_mask; + txq->ctx_cache[ctx_idx].tx_offload.data = + tx_offload_mask.data & tx_offload.data; + txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask; ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); + vlan_macip_lens = tx_offload.l3_len; + vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT); + vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT); ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); ctx_txd->seqnum_seed = 0; @@ -556,21 +453,21 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq, * or create a new context descriptor. */ static inline uint32_t -what_advctx_update(struct igb_tx_queue *txq, uint16_t flags, - uint32_t vlan_macip_lens) +what_advctx_update(struct igb_tx_queue *txq, uint64_t flags, + union ixgbe_tx_offload tx_offload) { /* If match with the current used context */ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && - (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data == - (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) { + (txq->ctx_cache[txq->ctx_curr].tx_offload.data == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) { return txq->ctx_curr; } /* What if match with the next context */ txq->ctx_curr ^= 1; if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && - (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data == - (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) { + (txq->ctx_cache[txq->ctx_curr].tx_offload.data == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) { return txq->ctx_curr; } @@ -579,22 +476,27 @@ what_advctx_update(struct igb_tx_queue *txq, uint16_t flags, } static inline uint32_t -tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags) +tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) { - static const uint32_t l4_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_TXSM}; - static const uint32_t l3_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_IXSM}; - uint32_t tmp; - - tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM]; - tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0]; + uint32_t tmp = 0; + if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) + tmp |= IXGBE_ADVTXD_POPTS_TXSM; + if (ol_flags & PKT_TX_IP_CKSUM) + tmp |= IXGBE_ADVTXD_POPTS_IXSM; + if (ol_flags & PKT_TX_TCP_SEG) + tmp |= IXGBE_ADVTXD_POPTS_TXSM; return tmp; } static inline uint32_t -tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags) +tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) { - static const uint32_t vlan_cmd[2] = {0, IXGBE_ADVTXD_DCMD_VLE}; - return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0]; + uint32_t cmdtype = 0; + if (ol_flags & PKT_TX_VLAN_PKT) + cmdtype |= IXGBE_ADVTXD_DCMD_VLE; + if (ol_flags & PKT_TX_TCP_SEG) + cmdtype |= IXGBE_ADVTXD_DCMD_TSE; + return cmdtype; } /* Default RS bit threshold values */ @@ -617,9 +519,9 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq) uint16_t nb_tx_to_clean; /* Determine the last descriptor needing to be cleaned */ - desc_to_clean_to = last_desc_cleaned + txq->tx_rs_thresh; + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); if (desc_to_clean_to >= nb_tx_desc) - desc_to_clean_to = desc_to_clean_to - nb_tx_desc; + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); /* Check to make sure the last descriptor to clean is done */ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; @@ -636,10 +538,11 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq) /* Figure out how many descriptors will be cleaned */ if (last_desc_cleaned > desc_to_clean_to) - nb_tx_to_clean = ((nb_tx_desc - last_desc_cleaned) + - desc_to_clean_to); + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); else - nb_tx_to_clean = desc_to_clean_to - last_desc_cleaned; + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); PMD_TX_FREE_LOG(DEBUG, "Cleaning %4u TX descriptors: %4u to %4u " @@ -657,7 +560,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq) /* Update the txq to reflect the last descriptor that was cleaned */ txq->last_desc_cleaned = desc_to_clean_to; - txq->nb_tx_free += nb_tx_to_clean; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); /* No Error */ return (0); @@ -679,15 +582,15 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t cmd_type_len; uint32_t pkt_len; uint16_t slen; - uint16_t ol_flags; + uint64_t ol_flags; uint16_t tx_id; uint16_t tx_last; uint16_t nb_tx; uint16_t nb_used; - uint16_t tx_ol_req; - uint32_t vlan_macip_lens; - uint32_t ctx; + uint64_t tx_ol_req; + uint32_t ctx = 0; uint32_t new_ctx; + union ixgbe_tx_offload tx_offload = { .data = 0 }; txq = tx_queue; sw_ring = txq->sw_ring; @@ -700,27 +603,32 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ixgbe_xmit_cleanup(txq); } + rte_prefetch0(&txe->mbuf->pool); + /* TX loop */ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { new_ctx = 0; tx_pkt = *tx_pkts++; - pkt_len = tx_pkt->pkt.pkt_len; - - RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + pkt_len = tx_pkt->pkt_len; /* * Determine how many (if any) context descriptors * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; - vlan_macip_lens = tx_pkt->pkt.vlan_macip.data; /* If hardware offload required */ - tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK; + tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK; if (tx_ol_req) { + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.vlan_tci = tx_pkt->vlan_tci; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + /* If new context need be built or reuse the exist ctx. */ ctx = what_advctx_update(txq, tx_ol_req, - vlan_macip_lens); + tx_offload); /* Only allocate context descriptor if required*/ new_ctx = (ctx == IXGBE_CTX_NUM); ctx = txq->ctx_curr; @@ -731,7 +639,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * This will always be the number of segments + the number of * Context descriptors required to transmit the packet */ - nb_used = tx_pkt->pkt.nb_segs + new_ctx; + nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx); /* * The number of descriptors that must be allocated for a @@ -748,7 +656,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" - " tx_first=%u tx_last=%u\n", + " tx_first=%u tx_last=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) pkt_len, @@ -835,13 +743,22 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ cmd_type_len = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; - olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + #ifdef RTE_LIBRTE_IEEE1588 if (ol_flags & PKT_TX_IEEE1588_TMST) cmd_type_len |= IXGBE_ADVTXD_MAC_1588; #endif + olinfo_status = 0; if (tx_ol_req) { + + if (ol_flags & PKT_TX_TCP_SEG) { + /* when TSO is on, paylen in descriptor is the + * not the packet len but the tcp payload len */ + pkt_len -= (tx_offload.l2_len + + tx_offload.l3_len + tx_offload.l4_len); + } + /* * Setup the TX Advanced Context Descriptor if required */ @@ -854,7 +771,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, &txr[tx_id]; txn = &sw_ring[txe->next_id]; - RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + rte_prefetch0(&txn->mbuf->pool); if (txe->mbuf != NULL) { rte_pktmbuf_free_seg(txe->mbuf); @@ -862,7 +779,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - vlan_macip_lens); + tx_offload); txe->last_id = tx_last; tx_id = txe->next_id; @@ -874,15 +791,18 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * This path will go through * whatever new/reuse the context descriptor */ - cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags); + cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags); olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags); olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT; } + olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + m_seg = tx_pkt; do { txd = &txr[tx_id]; txn = &sw_ring[txe->next_id]; + rte_prefetch0(&txn->mbuf->pool); if (txe->mbuf != NULL) rte_pktmbuf_free_seg(txe->mbuf); @@ -891,7 +811,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* * Set up Transmit Data Descriptor. */ - slen = m_seg->pkt.data_len; + slen = m_seg->data_len; buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); txd->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); @@ -902,15 +822,15 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, txe->last_id = tx_last; tx_id = txe->next_id; txe = txn; - m_seg = m_seg->pkt.next; + m_seg = m_seg->next; } while (m_seg != NULL); /* * The last packet data descriptor needs End Of Packet (EOP) */ cmd_type_len |= IXGBE_TXD_CMD_EOP; - txq->nb_tx_used += nb_used; - txq->nb_tx_free -= nb_used; + txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); /* Set RS bit only on threshold packets' last descriptor */ if (txq->nb_tx_used >= txq->tx_rs_thresh) { @@ -946,19 +866,19 @@ end_of_tx: * RX functions * **********************************************************************/ -static inline uint16_t +static inline uint64_t rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) { - uint16_t pkt_flags; + uint64_t pkt_flags; - static uint16_t ip_pkt_types_map[16] = { + static uint64_t ip_pkt_types_map[16] = { 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV6_HDR, 0, 0, 0, PKT_RX_IPV6_HDR_EXT, 0, 0, 0, PKT_RX_IPV6_HDR_EXT, 0, 0, 0, }; - static uint16_t ip_rss_types_map[16] = { + static uint64_t ip_rss_types_map[16] = { 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0, 0, 0, @@ -966,49 +886,49 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) }; #ifdef RTE_LIBRTE_IEEE1588 - static uint32_t ip_pkt_etqf_map[8] = { + static uint64_t ip_pkt_etqf_map[8] = { 0, 0, 0, PKT_RX_IEEE1588_PTP, 0, 0, 0, 0, }; - pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? - ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] : - ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]); + pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? + ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] : + ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]; #else - pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 : - ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]); + pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 : + ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]; #endif - return (pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]); + return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]; } -static inline uint16_t +static inline uint64_t rx_desc_status_to_pkt_flags(uint32_t rx_status) { - uint16_t pkt_flags; + uint64_t pkt_flags; /* * Check if VLAN present only. * Do not check whether L3/L4 rx checksum done by NIC or not, * That can be found from rte_eth_rxmode.hw_ip_checksum flag */ - pkt_flags = (uint16_t) (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0; + pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0; #ifdef RTE_LIBRTE_IEEE1588 if (rx_status & IXGBE_RXD_STAT_TMST) - pkt_flags = (pkt_flags | PKT_RX_IEEE1588_TMST); + pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST; #endif return pkt_flags; } -static inline uint16_t +static inline uint64_t rx_desc_error_to_pkt_flags(uint32_t rx_status) { /* * Bit 31: IPE, IPv4 checksum error * Bit 30: L4I, L4I integrity error */ - static uint16_t error_to_pkt_flags_map[4] = { + static uint64_t error_to_pkt_flags_map[4] = { 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD }; @@ -1019,7 +939,7 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * LOOK_AHEAD defines how many desc statuses to check beyond the - * current descriptor. + * current descriptor. * It must be a pound define for optimal performance. * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring * function only works with LOOK_AHEAD=8. @@ -1035,6 +955,7 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq) struct igb_rx_entry *rxep; struct rte_mbuf *mb; uint16_t pkt_len; + uint64_t pkt_flags; int s[LOOK_AHEAD], nb_dd; int i, j, nb_rx = 0; @@ -1058,29 +979,38 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq) for (j = LOOK_AHEAD-1; j >= 0; --j) s[j] = rxdp[j].wb.upper.status_error; - /* Clear everything but the status bits (LSB) */ + /* Compute how many status bits were set */ + nb_dd = 0; for (j = 0; j < LOOK_AHEAD; ++j) - s[j] &= IXGBE_RXDADV_STAT_DD; + nb_dd += s[j] & IXGBE_RXDADV_STAT_DD; - /* Compute how many status bits were set */ - nb_dd = s[0]+s[1]+s[2]+s[3]+s[4]+s[5]+s[6]+s[7]; nb_rx += nb_dd; /* Translate descriptor info to mbuf format */ for (j = 0; j < nb_dd; ++j) { mb = rxep[j].mbuf; - pkt_len = rxdp[j].wb.upper.length - rxq->crc_len; - mb->pkt.data_len = pkt_len; - mb->pkt.pkt_len = pkt_len; - mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan; - mb->pkt.hash.rss = rxdp[j].wb.lower.hi_dword.rss; + pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len); + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + mb->vlan_tci = rxdp[j].wb.upper.vlan; + mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); /* convert descriptor fields to rte mbuf flags */ - mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags( + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags( rxdp[j].wb.lower.lo_dword.data); /* reuse status field from scan list */ - mb->ol_flags |= rx_desc_status_to_pkt_flags(s[j]); - mb->ol_flags |= rx_desc_error_to_pkt_flags(s[j]); + pkt_flags |= rx_desc_status_to_pkt_flags(s[j]); + pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); + mb->ol_flags = pkt_flags; + + if (likely(pkt_flags & PKT_RX_RSS_HASH)) + mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss; + else if (pkt_flags & PKT_RX_FDIR) { + mb->hash.fdir.hash = + (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum) + & IXGBE_ATR_HASH_MASK); + mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id; + } } /* Move mbuf pointers from the S/W ring to the stage */ @@ -1094,8 +1024,10 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq) } /* clear software ring entries so we can cleanup correctly */ - for (i = 0; i < nb_rx; ++i) + for (i = 0; i < nb_rx; ++i) { rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL; + } + return nb_rx; } @@ -1111,7 +1043,8 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq) int diag, i; /* allocate buffers in bulk directly into the S/W ring */ - alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1); + alloc_idx = (uint16_t)(rxq->rx_free_trigger - + (rxq->rx_free_thresh - 1)); rxep = &rxq->sw_ring[alloc_idx]; diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, rxq->rx_free_thresh); @@ -1123,11 +1056,10 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq) /* populate the static rte mbuf fields */ mb = rxep[i].mbuf; rte_mbuf_refcnt_set(mb, 1); - mb->type = RTE_MBUF_PKT; - mb->pkt.next = NULL; - mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM; - mb->pkt.nb_segs = 1; - mb->pkt.in_port = rxq->port_id; + mb->next = NULL; + mb->data_off = RTE_PKTMBUF_HEADROOM; + mb->nb_segs = 1; + mb->port = rxq->port_id; /* populate the descriptors */ dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM; @@ -1140,9 +1072,10 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq) IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger); /* update state of internal queue structure */ - rxq->rx_free_trigger += rxq->rx_free_thresh; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger + + rxq->rx_free_thresh); if (rxq->rx_free_trigger >= rxq->nb_rx_desc) - rxq->rx_free_trigger = (rxq->rx_free_thresh - 1); + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); /* no errors */ return 0; @@ -1156,15 +1089,15 @@ ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, int i; /* how many packets are ready to return? */ - nb_pkts = RTE_MIN(nb_pkts, rxq->rx_nb_avail); + nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); /* copy mbuf pointers to the application's packet list */ for (i = 0; i < nb_pkts; ++i) rx_pkts[i] = stage[i]; /* update internal queue state */ - rxq->rx_nb_avail -= nb_pkts; - rxq->rx_next_avail += nb_pkts; + rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); + rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); return nb_pkts; } @@ -1181,19 +1114,19 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); /* Scan the H/W ring for packets to receive */ - nb_rx = ixgbe_rx_scan_hw_ring(rxq); + nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq); /* update internal queue state */ rxq->rx_next_avail = 0; rxq->rx_nb_avail = nb_rx; - rxq->rx_tail += nb_rx; + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); /* if required, allocate new buffers to replenish descriptors */ if (rxq->rx_tail > rxq->rx_free_trigger) { if (ixgbe_rx_alloc_bufs(rxq) != 0) { int i, j; PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += @@ -1204,7 +1137,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * allocate new buffers to replenish the old ones. */ rxq->rx_nb_avail = 0; - rxq->rx_tail -= nb_rx; + rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j) rxq->sw_ring[j].mbuf = rxq->rx_stage[i]; @@ -1239,10 +1172,10 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, nb_rx = 0; while (nb_pkts) { uint16_t ret, n; - n = RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST); + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST); ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); - nb_rx += ret; - nb_pkts -= ret; + nb_rx = (uint16_t)(nb_rx + ret); + nb_pkts = (uint16_t)(nb_pkts - ret); if (ret < n) break; } @@ -1270,7 +1203,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t rx_id; uint16_t nb_rx; uint16_t nb_hold; - uint16_t pkt_flags; + uint64_t pkt_flags; nb_rx = 0; nb_hold = 0; @@ -1320,7 +1253,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * frames to its peer(s). */ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " - "ext_err_stat=0x%08x pkt_len=%u\n", + "ext_err_stat=0x%08x pkt_len=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); @@ -1328,7 +1261,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; @@ -1375,31 +1308,30 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) - rxq->crc_len); - rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; - rte_packet_prefetch(rxm->pkt.data); - rxm->pkt.nb_segs = 1; - rxm->pkt.next = NULL; - rxm->pkt.pkt_len = pkt_len; - rxm->pkt.data_len = pkt_len; - rxm->pkt.in_port = rxq->port_id; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->port = rxq->port_id; hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->pkt.vlan_macip.f.vlan_tci = - rte_le_to_cpu_16(rxd.wb.upper.vlan); + rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); - pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr)); - pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr)); + pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); rxm->ol_flags = pkt_flags; if (likely(pkt_flags & PKT_RX_RSS_HASH)) - rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss; + rxm->hash.rss = rxd.wb.lower.hi_dword.rss; else if (pkt_flags & PKT_RX_FDIR) { - rxm->pkt.hash.fdir.hash = + rxm->hash.fdir.hash = (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) & IXGBE_ATR_HASH_MASK); - rxm->pkt.hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id; + rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id; } /* * Store the mbuf address into the next entry of the array @@ -1421,7 +1353,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", + "nb_hold=%u nb_rx=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) nb_hold, (unsigned) nb_rx); @@ -1455,7 +1387,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_rx; uint16_t nb_hold; uint16_t data_len; - uint16_t pkt_flags; + uint64_t pkt_flags; nb_rx = 0; nb_hold = 0; @@ -1508,8 +1440,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "staterr=0x%x data_len=%u\n", + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); @@ -1517,7 +1449,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; @@ -1556,8 +1488,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * Set data length & data buffer address of mbuf. */ data_len = rte_le_to_cpu_16(rxd.wb.upper.length); - rxm->pkt.data_len = data_len; - rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; + rxm->data_len = data_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; /* * If this is the first buffer of the received packet, @@ -1569,13 +1501,13 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ if (first_seg == NULL) { first_seg = rxm; - first_seg->pkt.pkt_len = data_len; - first_seg->pkt.nb_segs = 1; + first_seg->pkt_len = data_len; + first_seg->nb_segs = 1; } else { - first_seg->pkt.pkt_len = (uint16_t)(first_seg->pkt.pkt_len + first_seg->pkt_len = (uint16_t)(first_seg->pkt_len + data_len); - first_seg->pkt.nb_segs++; - last_seg->pkt.next = rxm; + first_seg->nb_segs++; + last_seg->next = rxm; } /* @@ -1598,18 +1530,18 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * mbuf, subtract the length of that CRC part from the * data length of the previous mbuf. */ - rxm->pkt.next = NULL; + rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt.pkt_len -= ETHER_CRC_LEN; + first_seg->pkt_len -= ETHER_CRC_LEN; if (data_len <= ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); - first_seg->pkt.nb_segs--; - last_seg->pkt.data_len = (uint16_t) - (last_seg->pkt.data_len - + first_seg->nb_segs--; + last_seg->data_len = (uint16_t) + (last_seg->data_len - (ETHER_CRC_LEN - data_len)); - last_seg->pkt.next = NULL; + last_seg->next = NULL; } else - rxm->pkt.data_len = + rxm->data_len = (uint16_t) (data_len - ETHER_CRC_LEN); } @@ -1622,34 +1554,34 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * - VLAN TCI, if any, * - error flags. */ - first_seg->pkt.in_port = rxq->port_id; + first_seg->port = rxq->port_id; /* * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is * set in the pkt_flags field. */ - first_seg->pkt.vlan_macip.f.vlan_tci = - rte_le_to_cpu_16(rxd.wb.upper.vlan); + first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); pkt_flags = (pkt_flags | - rx_desc_status_to_pkt_flags(staterr)); + rx_desc_status_to_pkt_flags(staterr)); pkt_flags = (pkt_flags | - rx_desc_error_to_pkt_flags(staterr)); + rx_desc_error_to_pkt_flags(staterr)); first_seg->ol_flags = pkt_flags; if (likely(pkt_flags & PKT_RX_RSS_HASH)) - first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss; + first_seg->hash.rss = rxd.wb.lower.hi_dword.rss; else if (pkt_flags & PKT_RX_FDIR) { - first_seg->pkt.hash.fdir.hash = + first_seg->hash.fdir.hash = (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) & IXGBE_ATR_HASH_MASK); - first_seg->pkt.hash.fdir.id = + first_seg->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id; } /* Prefetch data of first segment, if configured to do so. */ - rte_packet_prefetch(first_seg->pkt.data); + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); /* * Store the mbuf address into the next entry of the array @@ -1686,7 +1618,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", + "nb_hold=%u nb_rx=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) nb_hold, (unsigned) nb_rx); @@ -1721,7 +1653,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * descriptors should meet the following condition: * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 */ -#define IXGBE_MIN_RING_DESC 64 +#define IXGBE_MIN_RING_DESC 32 #define IXGBE_MAX_RING_DESC 4096 /* @@ -1736,7 +1668,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, char z_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; - rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id); @@ -1744,8 +1676,13 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, if (mz) return mz; - return rte_memzone_reserve_aligned(z_name, (uint64_t) ring_size, - socket_id, 0, IXGBE_ALIGN); +#ifdef RTE_LIBRTE_XEN_DOM0 + return rte_memzone_reserve_bounded(z_name, ring_size, + socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M); +#else + return rte_memzone_reserve_aligned(z_name, ring_size, + socket_id, 0, IXGBE_ALIGN); +#endif } static void @@ -1764,11 +1701,19 @@ ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq) } static void -ixgbe_tx_queue_release(struct igb_tx_queue *txq) +ixgbe_tx_free_swring(struct igb_tx_queue *txq) { - if (txq != NULL) { - ixgbe_tx_queue_release_mbufs(txq); + if (txq != NULL && + txq->sw_ring != NULL) rte_free(txq->sw_ring); +} + +static void +ixgbe_tx_queue_release(struct igb_tx_queue *txq) +{ + if (txq != NULL && txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->free_swring(txq); rte_free(txq); } } @@ -1783,12 +1728,14 @@ ixgbe_dev_tx_queue_release(void *txq) static void ixgbe_reset_tx_queue(struct igb_tx_queue *txq) { + static const union ixgbe_adv_tx_desc zeroed_desc = { .read = { + .buffer_addr = 0}}; struct igb_tx_entry *txe = txq->sw_ring; uint16_t prev, i; /* Zero out HW ring memory */ - for (i = 0; i < sizeof(union ixgbe_adv_tx_desc) * txq->nb_tx_desc; i++) { - ((volatile char *)txq->tx_ring)[i] = 0; + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i] = zeroed_desc; } /* Initialize SW ring entries */ @@ -1802,8 +1749,8 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq) prev = i; } - txq->tx_next_dd = txq->tx_rs_thresh - 1; - txq->tx_next_rs = txq->tx_rs_thresh - 1; + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); txq->tx_tail = 0; txq->nb_tx_used = 0; @@ -1818,6 +1765,12 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq) IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); } +static struct ixgbe_txq_ops def_txq_ops = { + .release_mbufs = ixgbe_tx_queue_release_mbufs, + .free_swring = ixgbe_tx_free_swring, + .reset = ixgbe_reset_tx_queue, +}; + int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1864,43 +1817,41 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * H/W race condition, hence the maximum threshold constraints. * When set to zero use default values. */ - tx_rs_thresh = (tx_conf->tx_rs_thresh) ? - tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH; - tx_free_thresh = (tx_conf->tx_free_thresh) ? - tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; + tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? + tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); + tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); if (tx_rs_thresh >= (nb_desc - 2)) { - RTE_LOG(ERR, PMD, - "tx_rs_thresh must be less than the " - "number of TX descriptors minus 2. " - "(tx_rs_thresh=%u port=%d queue=%d)\n", - tx_rs_thresh, dev->data->port_id, queue_idx); + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " + "of TX descriptors minus 2. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } if (tx_free_thresh >= (nb_desc - 3)) { - RTE_LOG(ERR, PMD, - "tx_rs_thresh must be less than the " - "tx_free_thresh must be less than the " - "number of TX descriptors minus 3. " - "(tx_free_thresh=%u port=%d queue=%d)\n", - tx_free_thresh, dev->data->port_id, queue_idx); + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " + "tx_free_thresh must be less than the number of " + "TX descriptors minus 3. (tx_free_thresh=%u " + "port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } if (tx_rs_thresh > tx_free_thresh) { - RTE_LOG(ERR, PMD, - "tx_rs_thresh must be less than or equal to " - "tx_free_thresh. " - "(tx_free_thresh=%u tx_rs_thresh=%u " - "port=%d queue=%d)\n", - tx_free_thresh, tx_rs_thresh, - dev->data->port_id, queue_idx); + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to " + "tx_free_thresh. (tx_free_thresh=%u " + "tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); return -(EINVAL); } if ((nb_desc % tx_rs_thresh) != 0) { - RTE_LOG(ERR, PMD, - "tx_rs_thresh must be a divisor of the" - "number of TX descriptors. " - "(tx_rs_thresh=%u port=%d queue=%d)\n", - tx_rs_thresh, dev->data->port_id, queue_idx); + PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the " + "number of TX descriptors. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } @@ -1911,22 +1862,22 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * accumulates WTHRESH descriptors. */ if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) { - RTE_LOG(ERR, PMD, - "TX WTHRESH must be set to 0 if " - "tx_rs_thresh is greater than 1. " - "(tx_rs_thresh=%u port=%d queue=%d)\n", - tx_rs_thresh, - dev->data->port_id, queue_idx); + PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } /* Free memory prior to re-allocation if needed... */ - if (dev->data->tx_queues[queue_idx] != NULL) + if (dev->data->tx_queues[queue_idx] != NULL) { ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } /* First allocate the tx queue data structure */ - txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue), - CACHE_LINE_SIZE); + txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); if (txq == NULL) return (-ENOMEM); @@ -1950,8 +1901,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->hthresh = tx_conf->tx_thresh.hthresh; txq->wthresh = tx_conf->tx_thresh.wthresh; txq->queue_id = queue_idx; + txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? + queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); txq->port_id = dev->data->port_id; txq->txq_flags = tx_conf->txq_flags; + txq->ops = &def_txq_ops; + txq->tx_deferred_start = tx_conf->tx_deferred_start; /* * Modification to set VFTDT for virtual function if vf is detected @@ -1959,32 +1914,55 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, if (hw->mac.type == ixgbe_mac_82599_vf) txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx)); else - txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(queue_idx)); - + txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx)); +#ifndef RTE_LIBRTE_XEN_DOM0 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr; +#else + txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); +#endif txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr; /* Allocate software ring */ - txq->sw_ring = rte_zmalloc("txq->sw_ring", - sizeof(struct igb_tx_entry) * nb_desc, - CACHE_LINE_SIZE); + txq->sw_ring = rte_zmalloc_socket("txq->sw_ring", + sizeof(struct igb_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE, socket_id); if (txq->sw_ring == NULL) { ixgbe_tx_queue_release(txq); return (-ENOMEM); } - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); - ixgbe_reset_tx_queue(txq); - - dev->data->tx_queues[queue_idx] = txq; - /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && - (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) - dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; - else + (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { + PMD_INIT_LOG(INFO, "Using simple tx code path"); +#ifdef RTE_IXGBE_INC_VECTOR + if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + ixgbe_txq_vec_setup(txq) == 0) { + PMD_INIT_LOG(INFO, "Vector tx enabled."); + dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; + } + else +#endif + dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; + } else { + PMD_INIT_LOG(INFO, "Using full-featured tx code path"); + PMD_INIT_LOG(INFO, " - txq_flags = %lx " + "[IXGBE_SIMPLE_FLAGS=%lx]", + (long unsigned)txq->txq_flags, + (long unsigned)IXGBE_SIMPLE_FLAGS); + PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu " + "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", + (long unsigned)txq->tx_rs_thresh, + (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST); dev->tx_pkt_burst = ixgbe_xmit_pkts; + } + + txq->ops->reset(txq); + + dev->data->tx_queues[queue_idx] = txq; + return (0); } @@ -2039,7 +2017,11 @@ ixgbe_dev_rx_queue_release(void *rxq) * function must be used. */ static inline int +#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq) +#else +check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq) +#endif { int ret = 0; @@ -2053,15 +2035,34 @@ check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq) * outside of this function. */ #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC - if (! (rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) + if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "RTE_PMD_IXGBE_RX_MAX_BURST=%d", + rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST); ret = -EINVAL; - else if (! (rxq->rx_free_thresh < rxq->nb_rx_desc)) + } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "rxq->nb_rx_desc=%d", + rxq->rx_free_thresh, rxq->nb_rx_desc); ret = -EINVAL; - else if (! ((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) + } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "rxq->rx_free_thresh=%d", + rxq->nb_rx_desc, rxq->rx_free_thresh); ret = -EINVAL; - else if (! (rxq->nb_rx_desc < - (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) + } else if (!(rxq->nb_rx_desc < + (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "IXGBE_MAX_RING_DESC=%d, " + "RTE_PMD_IXGBE_RX_MAX_BURST=%d", + rxq->nb_rx_desc, IXGBE_MAX_RING_DESC, + RTE_PMD_IXGBE_RX_MAX_BURST); ret = -EINVAL; + } #else ret = -EINVAL; #endif @@ -2073,6 +2074,8 @@ check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq) static void ixgbe_reset_rx_queue(struct igb_rx_queue *rxq) { + static const union ixgbe_adv_rx_desc zeroed_desc = { .read = { + .pkt_addr = 0}}; unsigned i; uint16_t len; @@ -2089,7 +2092,7 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq) #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0) /* zero out extra memory */ - len = rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST; + len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST); else #endif /* do not zero out extra memory */ @@ -2100,8 +2103,8 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq) * the H/W ring so look-ahead logic in Rx Burst bulk alloc function * reads extra memory as zeros. */ - for (i = 0; i < len * sizeof(union ixgbe_adv_rx_desc); i++) { - ((volatile char *)rxq->rx_ring)[i] = 0; + for (i = 0; i < len; i++) { + rxq->rx_ring[i] = zeroed_desc; } #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC @@ -2116,7 +2119,7 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq) rxq->rx_nb_avail = 0; rxq->rx_next_avail = 0; - rxq->rx_free_trigger = rxq->rx_free_thresh - 1; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); #endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ rxq->rx_tail = 0; rxq->nb_rx_hold = 0; @@ -2153,22 +2156,27 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, } /* Free memory prior to re-allocation if needed... */ - if (dev->data->rx_queues[queue_idx] != NULL) + if (dev->data->rx_queues[queue_idx] != NULL) { ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } /* First allocate the rx queue data structure */ - rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue), - CACHE_LINE_SIZE); + rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); if (rxq == NULL) return (-ENOMEM); rxq->mb_pool = mp; rxq->nb_rx_desc = nb_desc; rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; + rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? + queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : - ETHER_CRC_LEN); + rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? + 0 : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; + rxq->rx_deferred_start = rx_conf->rx_deferred_start; /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -2176,63 +2184,89 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, * resizing in later calls to the queue setup function. */ rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, - IXGBE_MAX_RING_DESC * sizeof(union ixgbe_adv_rx_desc), - socket_id); + RX_RING_SZ, socket_id); if (rz == NULL) { ixgbe_rx_queue_release(rxq); return (-ENOMEM); } + /* - * Modified to setup VFRDT for Virtual Function + * Zero init all the descriptors in the ring. */ - if (hw->mac.type == ixgbe_mac_82599_vf) - rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx)); - else - rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(queue_idx)); + memset (rz->addr, 0, RX_RING_SZ); + /* + * Modified to setup VFRDT for Virtual Function + */ + if (hw->mac.type == ixgbe_mac_82599_vf) { + rxq->rdt_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx)); + rxq->rdh_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx)); + } + else { + rxq->rdt_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx)); + rxq->rdh_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx)); + } +#ifndef RTE_LIBRTE_XEN_DOM0 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr; +#else + rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); +#endif rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr; /* - * Allocate software ring. Allow for space at the end of the + * Allocate software ring. Allow for space at the end of the * S/W ring to make sure look-ahead logic in bulk alloc Rx burst * function does not access an invalid memory region. */ #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC - len = nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST; + len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST); #else len = nb_desc; #endif - rxq->sw_ring = rte_zmalloc("rxq->sw_ring", - sizeof(struct igb_rx_entry) * len, - CACHE_LINE_SIZE); + rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", + sizeof(struct igb_rx_entry) * len, + RTE_CACHE_LINE_SIZE, socket_id); if (rxq->sw_ring == NULL) { ixgbe_rx_queue_release(rxq); return (-ENOMEM); } - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); /* - * Certain constaints must be met in order to use the bulk buffer + * Certain constraints must be met in order to use the bulk buffer * allocation Rx burst function. */ use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); +#ifdef RTE_IXGBE_INC_VECTOR + ixgbe_rxq_vec_setup(rxq); +#endif /* Check if pre-conditions are satisfied, and no Scattered Rx */ if (!use_def_burst_func && !dev->data->scattered_rx) { #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function will be " - "used on port=%d, queue=%d.\n", + "used on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; +#ifdef RTE_IXGBE_INC_VECTOR + if (!ixgbe_rx_vec_condition_check(dev)) { + PMD_INIT_LOG(INFO, "Vector rx enabled, please make " + "sure RX burst size no less than 32."); + dev->rx_pkt_burst = ixgbe_recv_pkts_vec; + } +#endif #endif } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions " "are not satisfied, Scattered Rx is requested, " "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not " - "enabled (port=%d, queue=%d).\n", + "enabled (port=%d, queue=%d).", rxq->port_id, rxq->queue_id); } dev->data->rx_queues[queue_idx] = rxq; @@ -2242,6 +2276,51 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, return 0; } +uint32_t +ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define IXGBE_RXQ_SCAN_INTERVAL 4 + volatile union ixgbe_adv_rx_desc *rxdp; + struct igb_rx_queue *rxq; + uint32_t desc = 0; + + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id); + return 0; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + + while ((desc < rxq->nb_rx_desc) && + (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) { + desc += IXGBE_RXQ_SCAN_INTERVAL; + rxdp += IXGBE_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct igb_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return 0; + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD); +} + void ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { @@ -2252,8 +2331,8 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { struct igb_tx_queue *txq = dev->data->tx_queues[i]; if (txq != NULL) { - ixgbe_tx_queue_release_mbufs(txq); - ixgbe_reset_tx_queue(txq); + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); } } @@ -2315,49 +2394,29 @@ ixgbe_rss_disable(struct rte_eth_dev *dev) } static void -ixgbe_rss_configure(struct rte_eth_dev *dev) +ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) { - struct ixgbe_hw *hw; - uint8_t *hash_key; - uint32_t rss_key; + uint8_t *hash_key; uint32_t mrqc; - uint32_t reta; - uint16_t rss_hf; + uint32_t rss_key; + uint64_t rss_hf; uint16_t i; - uint16_t j; - - PMD_INIT_FUNC_TRACE(); - hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - - rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; - if (rss_hf == 0) { /* Disable RSS */ - ixgbe_rss_disable(dev); - return; - } - hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; - if (hash_key == NULL) - hash_key = rss_intel_key; /* Default hash key */ - - /* Fill in RSS hash key */ - for (i = 0; i < 10; i++) { - rss_key = hash_key[(i * 4)]; - rss_key |= hash_key[(i * 4) + 1] << 8; - rss_key |= hash_key[(i * 4) + 2] << 16; - rss_key |= hash_key[(i * 4) + 3] << 24; - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key); - } - /* Fill in redirection table */ - reta = 0; - for (i = 0, j = 0; i < 128; i++, j++) { - if (j == dev->data->nb_rx_queues) j = 0; - reta = (reta << 8) | j; - if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), rte_bswap32(reta)); + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + /* Fill in RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = hash_key[(i * 4)]; + rss_key |= hash_key[(i * 4) + 1] << 8; + rss_key |= hash_key[(i * 4) + 2] << 16; + rss_key |= hash_key[(i * 4) + 3] << 24; + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key); + } } - /* Set configured hashing functions in MRQC register */ - mrqc = IXGBE_MRQC_RSSEN; /* RSS enable */ + /* Set configured hashing protocols in MRQC register */ + rss_hf = rss_conf->rss_hf; + mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */ if (rss_hf & ETH_RSS_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; if (rss_hf & ETH_RSS_IPV4_TCP) @@ -2379,6 +2438,133 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } +int +ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ixgbe_hw *hw; + uint32_t mrqc; + uint64_t rss_hf; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS): + * "RSS enabling cannot be done dynamically while it must be + * preceded by a software reset" + * Before changing anything, first check that the update RSS operation + * does not attempt to disable RSS, if RSS was enabled at + * initialization time, or does not attempt to enable RSS, if RSS was + * disabled at initialization time. + */ + rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL; + mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */ + if (rss_hf != 0) /* Enable RSS */ + return -(EINVAL); + return 0; /* Nothing to do */ + } + /* RSS enabled */ + if (rss_hf == 0) /* Disable RSS */ + return -(EINVAL); + ixgbe_hw_rss_hash_set(hw, rss_conf); + return 0; +} + +int +ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ixgbe_hw *hw; + uint8_t *hash_key; + uint32_t mrqc; + uint32_t rss_key; + uint64_t rss_hf; + uint16_t i; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + /* Return RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i); + hash_key[(i * 4)] = rss_key & 0x000000FF; + hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF; + hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF; + hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF; + } + } + + /* Get RSS functions configured in MRQC register */ + mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */ + rss_conf->rss_hf = 0; + return 0; + } + rss_hf = 0; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4) + rss_hf |= ETH_RSS_IPV4; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP) + rss_hf |= ETH_RSS_IPV4_TCP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6) + rss_hf |= ETH_RSS_IPV6; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX) + rss_hf |= ETH_RSS_IPV6_EX; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP) + rss_hf |= ETH_RSS_IPV6_TCP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP) + rss_hf |= ETH_RSS_IPV6_TCP_EX; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP) + rss_hf |= ETH_RSS_IPV4_UDP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP) + rss_hf |= ETH_RSS_IPV6_UDP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP) + rss_hf |= ETH_RSS_IPV6_UDP_EX; + rss_conf->rss_hf = rss_hf; + return 0; +} + +static void +ixgbe_rss_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_rss_conf rss_conf; + struct ixgbe_hw *hw; + uint32_t reta; + uint16_t i; + uint16_t j; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Fill in redirection table + * The byte-swap is needed because NIC registers are in + * little-endian order. + */ + reta = 0; + for (i = 0, j = 0; i < 128; i++, j++) { + if (j == dev->data->nb_rx_queues) + j = 0; + reta = (reta << 8) | j; + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), + rte_bswap32(reta)); + } + + /* + * Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf; + if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) { + ixgbe_rss_disable(dev); + return; + } + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = rss_intel_key; /* Default hash key */ + ixgbe_hw_rss_hash_set(hw, &rss_conf); +} + #define NUM_VFTA_REGISTERS 128 #define NIC_RX_BUFFER_SIZE 0x200 @@ -2437,6 +2623,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) } else { vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL; } + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */ @@ -2493,13 +2680,13 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure */ -static void +static void ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { uint32_t reg; uint32_t q; - + PMD_INIT_FUNC_TRACE(); if (hw->mac.type != ixgbe_mac_82598EB) { /* Disable the Tx desc arbiter so that MTQC can be changed */ @@ -2547,21 +2734,21 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev, { struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf; - struct ixgbe_hw *hw = + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - + PMD_INIT_FUNC_TRACE(); - if (hw->mac.type != ixgbe_mac_82598EB) + if (hw->mac.type != ixgbe_mac_82598EB) /*PF VF Transmit Enable*/ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); - + /*Configure general DCB TX parameters*/ ixgbe_dcb_tx_hw_config(hw,dcb_config); return; } -static void +static void ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config) { @@ -2583,19 +2770,20 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = vmdq_rx_conf->dcb_queue[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (1 << j); + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = + (uint8_t)(1 << j); } } -static void +static void ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config) -{ +{ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf; struct ixgbe_dcb_tc_config *tc; uint8_t i,j; - + /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) { dcb_config->num_tcs.pg_tcs = ETH_8_TCS; @@ -2610,46 +2798,51 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = vmdq_tx_conf->dcb_queue[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (1 << j); + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = + (uint8_t)(1 << j); } return; } -static void -ixgbe_dcb_rx_config(struct rte_eth_dev *dev,struct ixgbe_dcb_config *dcb_config) +static void +ixgbe_dcb_rx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) { struct rte_eth_dcb_rx_conf *rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; struct ixgbe_dcb_tc_config *tc; uint8_t i,j; - dcb_config->num_tcs.pg_tcs = rx_conf->nb_tcs; - dcb_config->num_tcs.pfc_tcs = rx_conf->nb_tcs; - - /* User Priority to Traffic Class mapping */ + dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs; + dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs; + + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = rx_conf->dcb_queue[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (1 << j); + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = + (uint8_t)(1 << j); } } -static void -ixgbe_dcb_tx_config(struct rte_eth_dev *dev,struct ixgbe_dcb_config *dcb_config) +static void +ixgbe_dcb_tx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) { struct rte_eth_dcb_tx_conf *tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; struct ixgbe_dcb_tc_config *tc; uint8_t i,j; - dcb_config->num_tcs.pg_tcs = tx_conf->nb_tcs; - dcb_config->num_tcs.pfc_tcs = tx_conf->nb_tcs; - - /* User Priority to Traffic Class mapping */ + dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs; + dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs; + + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = tx_conf->dcb_queue[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (1 << j); + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = + (uint8_t)(1 << j); } } @@ -2704,7 +2897,7 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); - + /* VFTA - enable all vlan filters */ for (i = 0; i < NUM_VFTA_REGISTERS; i++) { IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); @@ -2716,11 +2909,11 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, */ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - + return; } -static void +static void ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) { @@ -2730,6 +2923,8 @@ ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, map); break; @@ -2738,7 +2933,7 @@ ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill, } } -static void +static void ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) { @@ -2749,6 +2944,8 @@ ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *m break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa); ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map); break; @@ -2761,7 +2958,7 @@ ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *m #define DCB_TX_CONFIG 1 #define DCB_TX_PB 1024 /** - * ixgbe_dcb_hw_configure - Enable DCB and configure + * ixgbe_dcb_hw_configure - Enable DCB and configure * general DCB in VT mode and non-VT mode parameters * @dev: pointer to rte_eth_dev structure * @dcb_config: pointer to ixgbe_dcb_config structure @@ -2781,17 +2978,17 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; struct ixgbe_dcb_tc_config *tc; - uint32_t max_frame = dev->data->max_frame_size; - struct ixgbe_hw *hw = + uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); switch(dev->data->dev_conf.rxmode.mq_mode){ - case ETH_VMDQ_DCB: + case ETH_MQ_RX_VMDQ_DCB: dcb_config->vt_mode = true; if (hw->mac.type != ixgbe_mac_82598EB) { config_dcb_rx = DCB_RX_CONFIG; /* - *get dcb and VT rx configuration parameters + *get dcb and VT rx configuration parameters *from rte_eth_conf */ ixgbe_vmdq_dcb_rx_config(dev,dcb_config); @@ -2799,7 +2996,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_vmdq_dcb_configure(dev); } break; - case ETH_DCB_RX: + case ETH_MQ_RX_DCB: dcb_config->vt_mode = false; config_dcb_rx = DCB_RX_CONFIG; /* Get dcb TX configuration parameters from rte_eth_conf */ @@ -2808,11 +3005,11 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_rx_hw_config(hw, dcb_config); break; default: - PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n"); + PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration"); break; } switch (dev->data->dev_conf.txmode.mq_mode) { - case ETH_VMDQ_DCB_TX: + case ETH_MQ_TX_VMDQ_DCB: dcb_config->vt_mode = true; config_dcb_tx = DCB_TX_CONFIG; /* get DCB and VT TX configuration parameters from rte_eth_conf */ @@ -2821,16 +3018,16 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config); break; - case ETH_DCB_TX: + case ETH_MQ_TX_DCB: dcb_config->vt_mode = false; - config_dcb_tx = DCB_RX_CONFIG; + config_dcb_tx = DCB_TX_CONFIG; /*get DCB TX configuration parameters from rte_eth_conf*/ ixgbe_dcb_tx_config(dev,dcb_config); /*Configure general DCB TX parameters*/ ixgbe_dcb_tx_hw_config(hw, dcb_config); break; default: - PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n"); + PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration"); break; } @@ -2841,8 +3038,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Avoid un-configured priority mapping to TC0 */ uint8_t j = 4; uint8_t mask = 0xFF; - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++) - mask &= ~ (1 << map[i]); + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++) + mask = (uint8_t)(mask & (~ (1 << map[i]))); for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) { if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES)) map[j++] = i; @@ -2851,8 +3048,10 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Re-configure 4 TCs BW */ for (i = 0; i < nb_tcs; i++) { tc = &dcb_config->tc_config[i]; - tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100 / nb_tcs; - tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100 / nb_tcs; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = + (uint8_t)(100 / nb_tcs); + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = + (uint8_t)(100 / nb_tcs); } for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { tc = &dcb_config->tc_config[i]; @@ -2947,16 +3146,149 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, void ixgbe_configure_dcb(struct rte_eth_dev *dev) { struct ixgbe_dcb_config *dcb_cfg = - IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); - - PMD_INIT_FUNC_TRACE(); + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &(dev->data->dev_conf); + + PMD_INIT_FUNC_TRACE(); + + /* check support mq_mode for DCB */ + if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) && + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB)) + return; + + if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES) + return; + /** Configure DCB hardware **/ - if(((dev->data->dev_conf.rxmode.mq_mode != ETH_RSS) && - (dev->data->nb_rx_queues == ETH_DCB_NUM_QUEUES))|| - ((dev->data->dev_conf.txmode.mq_mode != ETH_DCB_NONE) && - (dev->data->nb_tx_queues == ETH_DCB_NUM_QUEUES))) { - ixgbe_dcb_hw_configure(dev,dcb_cfg); + ixgbe_dcb_hw_configure(dev,dcb_cfg); + + return; +} + +/* + * VMDq only support for 10 GbE NIC. + */ +static void +ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_rx_conf *cfg; + struct ixgbe_hw *hw; + enum rte_eth_nb_pools num_pools; + uint32_t mrqc, vt_ctl, vlanctrl; + uint32_t vmolr = 0; + int i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + num_pools = cfg->nb_queue_pools; + + ixgbe_rss_disable(dev); + + /* MRQC: enable vmdq */ + mrqc = IXGBE_MRQC_VMDQEN; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* PFVTCTL: turn on virtualisation and set the default pool */ + vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; + if (cfg->enable_default_pool) + vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT); + else + vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL; + + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); + + for (i = 0; i < (int)num_pools; i++) { + vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); } + + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < NUM_VFTA_REGISTERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX); + + /* VFRE: pool enabling for receive - 64 */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX); + if (num_pools == ETH_64_POOLS) + IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX); + + /* + * MPSAR - allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX); + + /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \ + (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK))); + /* + * Put the allowed pools in VFB reg. As we only have 16 or 64 + * pools, we only need to use the first half of the register + * i.e. bits 0-31 + */ + if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0) + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \ + (cfg->pool_map[i].pools & UINT32_MAX)); + else + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \ + ((cfg->pool_map[i].pools >> 32) \ + & UINT32_MAX)); + + } + + /* PFDMA Tx General Switch Control Enables VMDQ loopback */ + if (cfg->enable_loop_back) { + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++) + IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX); + } + + IXGBE_WRITE_FLUSH(hw); +} + +/* + * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters + * @hw: pointer to hardware structure + */ +static void +ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) +{ + uint32_t reg; + uint32_t q; + + PMD_INIT_FUNC_TRACE(); + /*PF VF Transmit Enable*/ + IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX); + + /* Disable the Tx desc arbiter so that MTQC can be changed */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF; + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + + /* Disable drop for all queues */ + for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + + /* Enable the Tx desc arbiter */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + IXGBE_WRITE_FLUSH(hw); + return; } @@ -2972,17 +3304,16 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) volatile union ixgbe_adv_rx_desc *rxd; struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); if (mbuf == NULL) { - PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n", + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", (unsigned) rxq->queue_id); return (-ENOMEM); } rte_mbuf_refcnt_set(mbuf, 1); - mbuf->type = RTE_MBUF_PKT; - mbuf->pkt.next = NULL; - mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM; - mbuf->pkt.nb_segs = 1; - mbuf->pkt.in_port = rxq->port_id; + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); @@ -2995,6 +3326,120 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) return 0; } +static int +ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type == ixgbe_mac_82598EB) + return 0; + + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * SRIOV inactive scheme + * any DCB/RSS w/o VMDq multi-queue setting + */ + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + ixgbe_rss_configure(dev); + break; + + case ETH_MQ_RX_VMDQ_DCB: + ixgbe_vmdq_dcb_configure(dev); + break; + + case ETH_MQ_RX_VMDQ_ONLY: + ixgbe_vmdq_rx_hw_configure(dev); + break; + + case ETH_MQ_RX_NONE: + /* if mq_mode is none, disable rss mode.*/ + default: ixgbe_rss_disable(dev); + } + } else { + switch (RTE_ETH_DEV_SRIOV(dev).active) { + /* + * SRIOV active scheme + * FIXME if support DCB/RSS together with VMDq & SRIOV + */ + case ETH_64_POOLS: + IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQEN); + break; + + case ETH_32_POOLS: + IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN); + break; + + case ETH_16_POOLS: + IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN); + break; + default: + PMD_INIT_LOG(ERR, "invalid pool number in IOV mode"); + } + } + + return 0; +} + +static int +ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mtqc; + uint32_t rttdcs; + + if (hw->mac.type == ixgbe_mac_82598EB) + return 0; + + /* disable arbiter before setting MTQC */ + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + rttdcs |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * SRIOV inactive scheme + * any DCB w/o VMDq multi-queue setting + */ + if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY) + ixgbe_vmdq_tx_hw_configure(hw); + else { + mtqc = IXGBE_MTQC_64Q_1PB; + IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); + } + } else { + switch (RTE_ETH_DEV_SRIOV(dev).active) { + + /* + * SRIOV active scheme + * FIXME if support DCB together with VMDq & SRIOV + */ + case ETH_64_POOLS: + mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF; + break; + case ETH_32_POOLS: + mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF; + break; + case ETH_16_POOLS: + mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA | + IXGBE_MTQC_8TC_8TQ; + break; + default: + mtqc = IXGBE_MTQC_64Q_1PB; + PMD_INIT_LOG(ERR, "invalid pool number in IOV mode"); + } + IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); + } + + /* re-enable arbiter */ + rttdcs &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + return 0; +} + /* * Initializes Receive Unit. */ @@ -3014,7 +3459,6 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) uint32_t rxcsum; uint16_t buf_size; uint16_t i; - int ret; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3054,17 +3498,21 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) } else hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + /* + * If loopback mode is configured for 82599, set LPBK bit. + */ + if (hw->mac.type == ixgbe_mac_82599EB && + dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + hlreg0 |= IXGBE_HLREG0_LPBK; + else + hlreg0 &= ~IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - /* Allocate buffers for descriptor rings */ - ret = ixgbe_alloc_rx_queue_mbufs(rxq); - if (ret) - return ret; - /* * Reset crc_len in case it was changed after queue setup by a * call to configure. @@ -3075,14 +3523,14 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* Setup the Base and Length of the Rx Descriptor Rings */ bus_addr = rxq->rx_ring_phys_addr; - IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx), (uint32_t)(bus_addr & 0x00000000ffffffffULL)); - IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx), (uint32_t)(bus_addr >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx), rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc)); - IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0); /* Configure the SRRCTL register */ #ifdef RTE_HEADER_SPLIT_ENABLE @@ -3097,7 +3545,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_PSRTYPE_UDPHDR | IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), psrtype); + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype); } srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & @@ -3117,42 +3565,45 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * The value is in 1 KB resolution. Valid values can be from * 1 KB to 16 KB. */ - mbp_priv = (struct rte_pktmbuf_pool_private *) - ((char *)rxq->mb_pool + sizeof(struct rte_mempool)); + mbp_priv = rte_mempool_get_priv(rxq->mb_pool); buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) & IXGBE_SRRCTL_BSIZEPKT_MASK); - IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl); + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << IXGBE_SRRCTL_BSIZEPKT_SHIFT); - if (dev->data->dev_conf.rxmode.max_rx_pkt_len + - IXGBE_RX_BUF_THRESHOLD > buf_size){ + + /* It adds dual VLAN length for supporting dual VLAN */ + if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size){ + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; +#ifdef RTE_IXGBE_INC_VECTOR + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; +#else dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; +#endif } } + if (dev->data->dev_conf.rxmode.enable_scatter) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); +#ifdef RTE_IXGBE_INC_VECTOR + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; +#else + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; +#endif + dev->data->scattered_rx = 1; + } + /* - * Configure RSS if device configured with multiple RX queues. + * Device configured with multiple RX queues. */ - if (hw->mac.type == ixgbe_mac_82599EB) { - if (dev->data->nb_rx_queues > 1) - switch (dev->data->dev_conf.rxmode.mq_mode) { - case ETH_RSS: - ixgbe_rss_configure(dev); - break; - - case ETH_VMDQ_DCB: - ixgbe_vmdq_dcb_configure(dev); - break; - - default: ixgbe_rss_disable(dev); - } - else - ixgbe_rss_disable(dev); - } + ixgbe_dev_mq_rx_configure(dev); /* * Setup the Checksum Register. @@ -3192,15 +3643,15 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) uint64_t bus_addr; uint32_t hlreg0; uint32_t txctrl; - uint32_t rttdcs; uint16_t i; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - /* Enable TX CRC (checksum offload requirement) */ + /* Enable TX CRC (checksum offload requirement) and hw padding + * (TSO requirement) */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - hlreg0 |= IXGBE_HLREG0_TXCRCEN; + hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); /* Setup the Base and Length of the Tx Descriptor Rings */ @@ -3208,15 +3659,15 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) txq = dev->data->tx_queues[i]; bus_addr = txq->tx_ring_phys_addr; - IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx), (uint32_t)(bus_addr & 0x00000000ffffffffULL)); - IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx), (uint32_t)(bus_addr >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx), txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc)); /* Setup the HW Tx Head and TX Tail descriptor pointers */ - IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); /* * Disable Tx Head Writeback RO bit, since this hoses @@ -3225,38 +3676,58 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) switch (hw->mac.type) { case ixgbe_mac_82598EB: txctrl = IXGBE_READ_REG(hw, - IXGBE_DCA_TXCTRL(i)); + IXGBE_DCA_TXCTRL(txq->reg_idx)); txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx), txctrl); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: default: txctrl = IXGBE_READ_REG(hw, - IXGBE_DCA_TXCTRL_82599(i)); + IXGBE_DCA_TXCTRL_82599(txq->reg_idx)); txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx), txctrl); break; } } - if (hw->mac.type != ixgbe_mac_82598EB) { - /* disable arbiter before setting MTQC */ - rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); - rttdcs |= IXGBE_RTTDCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + /* Device configured with multiple TX queues. */ + ixgbe_dev_mq_tx_configure(dev); +} - IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); +/* + * Set up link for 82599 loopback mode Tx->Rx. + */ +static inline void +ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); - /* re-enable arbiter */ - rttdcs &= ~IXGBE_RTTDCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) != + IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "Could not enable loopback mode"); + /* ignore error */ + return; + } } + + /* Restart link */ + IXGBE_WRITE_REG(hw, + IXGBE_AUTOC, + IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU); + ixgbe_reset_pipeline_82599(hw); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + msec_delay(50); } + /* * Start Transmit and Receive Units. */ @@ -3268,10 +3739,8 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) struct igb_rx_queue *rxq; uint32_t txdctl; uint32_t dmatxctl; - uint32_t rxdctl; uint32_t rxctrl; uint16_t i; - int poll_ms; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3279,11 +3748,11 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; /* Setup Transmit Threshold Registers */ - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); txdctl |= txq->pthresh & 0x7F; txdctl |= ((txq->hthresh & 0x7F) << 8); txdctl |= ((txq->wthresh & 0x7F) << 16); - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); } if (hw->mac.type != ixgbe_mac_82598EB) { @@ -3293,50 +3762,219 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) } for (i = 0; i < dev->data->nb_tx_queues; i++) { - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); - txdctl |= IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl); - - /* Wait until TX Enable ready */ - if (hw->mac.type == ixgbe_mac_82599EB) { - poll_ms = 10; - do { - rte_delay_ms(1); - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); - } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable " - "Tx Queue %d\n", i); - } + txq = dev->data->tx_queues[i]; + if (!txq->tx_deferred_start) + ixgbe_dev_tx_queue_start(dev, i); } + for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + if (!rxq->rx_deferred_start) + ixgbe_dev_rx_queue_start(dev, i); + } + + /* Enable Receive engine */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; + rxctrl |= IXGBE_RXCTRL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); + + /* If loopback mode is enabled for 82599, set up the link accordingly */ + if (hw->mac.type == ixgbe_mac_82599EB && + dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + ixgbe_setup_loopback_link_82599(hw); + +} + +/* + * Start Receive Units for specified queue. + */ +int +ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ixgbe_hw *hw; + struct igb_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + /* Allocate buffers for descriptor rings */ + if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); + return -1; + } + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); rxdctl |= IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl); + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); /* Wait until RX Enable ready */ - poll_ms = 10; + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; do { rte_delay_ms(1); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable " - "Rx Queue %d\n", i); + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", + rx_queue_id); rte_wmb(); - IXGBE_WRITE_REG(hw, IXGBE_RDT(i), rxq->nb_rx_desc - 1); - } + IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); + } else + return -1; - /* Enable Receive engine */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - if (hw->mac.type == ixgbe_mac_82598EB) - rxctrl |= IXGBE_RXCTRL_DMBYPS; - rxctrl |= IXGBE_RXCTRL_RXEN; - hw->mac.ops.enable_rx_dma(hw, rxctrl); + return 0; +} + +/* + * Stop Receive Units for specified queue. + */ +int +ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ixgbe_hw *hw; + struct igb_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", + rx_queue_id); + + rte_delay_us(RTE_IXGBE_WAIT_100_US); + + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(rxq); + } else + return -1; + + return 0; } +/* + * Start Transmit Units for specified queue. + */ +int +ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ixgbe_hw *hw; + struct igb_tx_queue *txq; + uint32_t txdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (tx_queue_id < dev->data->nb_tx_queues) { + txq = dev->data->tx_queues[tx_queue_id]; + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + + /* Wait until TX Enable ready */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, + IXGBE_TXDCTL(txq->reg_idx)); + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable " + "Tx Queue %d", tx_queue_id); + } + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); + } else + return -1; + + return 0; +} + +/* + * Stop Transmit Units for specified queue. + */ +int +ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ixgbe_hw *hw; + struct igb_tx_queue *txq; + uint32_t txdctl; + uint32_t txtdh, txtdt; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (tx_queue_id < dev->data->nb_tx_queues) { + txq = dev->data->tx_queues[tx_queue_id]; + + /* Wait until TX queue is empty */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_us(RTE_IXGBE_WAIT_100_US); + txtdh = IXGBE_READ_REG(hw, + IXGBE_TDH(txq->reg_idx)); + txtdt = IXGBE_READ_REG(hw, + IXGBE_TDT(txq->reg_idx)); + } while (--poll_ms && (txtdh != txtdt)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Tx Queue %d is not empty " + "when stopping.", tx_queue_id); + } + + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + + /* Wait until TX Enable ready */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, + IXGBE_TXDCTL(txq->reg_idx)); + } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable " + "Tx Queue %d", tx_queue_id); + } + + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + } else + return -1; + + return 0; +} + /* * [VF] Initializes Receive Unit. */ @@ -3355,6 +3993,23 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* + * When the VF driver issues a IXGBE_VF_RESET request, the PF driver + * disables the VF receipt of packets if the PF MTU is > 1500. + * This is done to deal with 82599 limitations that imposes + * the PF and all VFs to share the same MTU. + * Then, the PF driver enables again the VF receipt of packet when + * the VF driver issues a IXGBE_VF_SET_LPE request. + * In the meantime, the VF device cannot be used, even if the VF driver + * and the Guest VM network stack are ready to accept packets with a + * size up to the PF MTU. + * As a work-around to this PF behaviour, force the call to + * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way, + * VF packets received can work in all cases. + */ + ixgbevf_rlpml_set_vf(hw, + (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + /* Setup RX queues */ dev->rx_pkt_burst = ixgbe_recv_pkts; for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -3412,8 +4067,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) * The value is in 1 KB resolution. Valid values can be from * 1 KB to 16 KB. */ - mbp_priv = (struct rte_pktmbuf_pool_private *) - ((char *)rxq->mb_pool + sizeof(struct rte_mempool)); + mbp_priv = rte_mempool_get_priv(rxq->mb_pool); buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) & @@ -3426,12 +4080,32 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << IXGBE_SRRCTL_BSIZEPKT_SHIFT); - if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){ + + /* It adds dual VLAN length for supporting dual VLAN */ + if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; +#ifdef RTE_IXGBE_INC_VECTOR + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; +#else dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; +#endif } } + if (dev->data->dev_conf.rxmode.enable_scatter) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); +#ifdef RTE_IXGBE_INC_VECTOR + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; +#else + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; +#endif + dev->data->scattered_rx = 1; + } + return 0; } @@ -3516,8 +4190,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable " - "Tx Queue %d\n", i); + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); } for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -3534,8 +4207,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable " - "Rx Queue %d\n", i); + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);