X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_e1000%2Figb_rxtx.c;h=9e3bec5d86d332c89c59eb1c2a8a37325b1864d2;hb=e9d48c0072d36eb6423b45fba4ec49d0def6c36f;hp=b816fb59f1b4a9209edf51a3cb3d1a3356a8177c;hpb=0f6b7c7f7a3752983a30cd9fb7fb1ffe3c3f4205;p=dpdk.git diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c index b816fb59f1..9e3bec5d86 100644 --- a/lib/librte_pmd_e1000/igb_rxtx.c +++ b/lib/librte_pmd_e1000/igb_rxtx.c @@ -1,35 +1,34 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: * - * * Redistributions of source code must retain the above copyright + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include @@ -196,6 +195,11 @@ struct igb_tx_queue { #define rte_packet_prefetch(p) do {} while(0) #endif +/* + * Macro for VMDq feature for 1 GbE NIC. + */ +#define E1000_VMOLR_SIZE (8) + /********************************************************************* * * TX function @@ -1130,16 +1134,16 @@ igb_reset_tx_queue_stat(struct igb_tx_queue *txq) static void igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) { + static const union e1000_adv_tx_desc zeroed_desc = { .read = { + .buffer_addr = 0}}; struct igb_tx_entry *txe = txq->sw_ring; - uint32_t size; uint16_t i, prev; struct e1000_hw *hw; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - size = sizeof(union e1000_adv_tx_desc) * txq->nb_tx_desc; /* Zero out HW ring memory */ - for (i = 0; i < size; i++) { - ((volatile char *)txq->tx_ring)[i] = 0; + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i] = zeroed_desc; } /* Initialize ring entries */ @@ -1291,13 +1295,13 @@ eth_igb_rx_queue_release(void *rxq) static void igb_reset_rx_queue(struct igb_rx_queue *rxq) { - unsigned size; + static const union e1000_adv_rx_desc zeroed_desc = { .read = { + .pkt_addr = 0}}; unsigned i; /* Zero out HW ring memory */ - size = sizeof(union e1000_adv_rx_desc) * rxq->nb_rx_desc; - for (i = 0; i < size; i++) { - ((volatile char *)rxq->rx_ring)[i] = 0; + for (i = 0; i < rxq->nb_rx_desc; i++) { + rxq->rx_ring[i] = zeroed_desc; } rxq->rx_tail = 0; @@ -1416,6 +1420,23 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) return 0; } +int +eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return 0; + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD); +} + void igb_dev_clear_queues(struct rte_eth_dev *dev) { @@ -1552,6 +1573,118 @@ igb_rss_configure(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_MRQC, mrqc); } +/* + * Check if the mac type support VMDq or not. + * Return 1 if it supports, otherwise, return 0. + */ +static int +igb_is_vmdq_supported(const struct rte_eth_dev *dev) +{ + const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i350: + return 1; + case e1000_82540: + case e1000_82541: + case e1000_82542: + case e1000_82543: + case e1000_82544: + case e1000_82545: + case e1000_82546: + case e1000_82547: + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_82574: + case e1000_82583: + case e1000_i210: + case e1000_i211: + default: + PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n"); + return 0; + } +} + +static int +igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_rx_conf *cfg; + struct e1000_hw *hw; + uint32_t mrqc, vt_ctl, vmolr, rctl; + int i; + + PMD_INIT_LOG(DEBUG, ">>"); + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + /* Check if mac type can support VMDq, return value of 0 means NOT support */ + if (igb_is_vmdq_supported(dev) == 0) + return -1; + + igb_rss_disable(dev); + + /* RCTL: eanble VLAN filter */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* MRQC: enable vmdq */ + mrqc = E1000_READ_REG(hw, E1000_MRQC); + mrqc |= E1000_MRQC_ENABLE_VMDQ; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + + /* VTCTL: pool selection according to VLAN tag */ + vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + if (cfg->enable_default_pool) + vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT); + vt_ctl |= E1000_VT_CTL_IGNORE_MAC; + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); + + /* + * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1 + * Both 82576 and 82580 support it + */ + if (hw->mac.type != e1000_i350) { + for (i = 0; i < E1000_VMOLR_SIZE; i++) { + vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); + vmolr |= E1000_VMOLR_STRVLAN; + E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); + } + } + + /* VFTA - enable all vlan filters */ + for (i = 0; i < IGB_VFTA_SIZE; i++) + E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX); + + /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */ + if (hw->mac.type != e1000_82580) + E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK); + + /* + * RAH/RAL - allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX)); + E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX); + + /* VLVF: set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \ + (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \ + ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \ + E1000_VLVF_POOLSEL_MASK))); + } + + E1000_WRITE_FLUSH(hw); + + return 0; +} + + /********************************************************************* * * Enable receive unit. @@ -1609,7 +1742,20 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev) * SRIOV inactive scheme */ if (dev->data->nb_rx_queues > 1) - igb_rss_configure(dev); + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_NONE: + /* if mq_mode not assign, we use rss mode.*/ + case ETH_MQ_RX_RSS: + igb_rss_configure(dev); + break; + case ETH_MQ_RX_VMDQ_ONLY: + /*Configure general VMDQ only RX parameters*/ + igb_vmdq_rx_hw_configure(dev); + break; + default: + igb_rss_disable(dev); + break; + } else igb_rss_disable(dev); } @@ -1768,6 +1914,9 @@ eth_igb_rx_init(struct rte_eth_dev *dev) */ igb_dev_mq_rx_configure(dev); + /* Update the rctl since igb_dev_mq_rx_configure may change its value */ + rctl |= E1000_READ_REG(hw, E1000_RCTL); + /* * Setup the Checksum Register. * Receive Full-Packet Checksum Offload is mutually exclusive with RSS. @@ -1786,8 +1935,11 @@ eth_igb_rx_init(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.hw_strip_crc) { rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - /* set STRCRC bit in all queues for Powerville/Springville */ - if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) { + /* set STRCRC bit in all queues */ + if (hw->mac.type == e1000_i350 || + hw->mac.type == e1000_i210 || + hw->mac.type == e1000_i211 || + hw->mac.type == e1000_i354) { for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, @@ -1799,8 +1951,11 @@ eth_igb_rx_init(struct rte_eth_dev *dev) } else { rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ - /* clear STRCRC bit in all queues for Powerville/Springville */ - if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) { + /* clear STRCRC bit in all queues */ + if (hw->mac.type == e1000_i350 || + hw->mac.type == e1000_i210 || + hw->mac.type == e1000_i211 || + hw->mac.type == e1000_i354) { for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, @@ -1817,7 +1972,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev) (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Make sure VLAN Filters are off. */ - rctl &= ~E1000_RCTL_VFE; + if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY) + rctl &= ~E1000_RCTL_VFE; /* Don't store bad packets. */ rctl &= ~E1000_RCTL_SBP;