X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_e1000%2Figb_rxtx.c;h=7fe1780a9a3a8bc6aa40df46465fcdf7f048de09;hb=242b69c0602e1759c1d4f66c836362908bcb63d1;hp=334082d00e9346fcebadeba04695991844ebde82;hpb=1c1d4d7a923d4804f1926fc5264f9ecdd8977b04;p=dpdk.git diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c index 334082d00e..7fe1780a9a 100644 --- a/lib/librte_pmd_e1000/igb_rxtx.c +++ b/lib/librte_pmd_e1000/igb_rxtx.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,7 +33,6 @@ #include -#include #include #include #include @@ -195,6 +194,11 @@ struct igb_tx_queue { #define rte_packet_prefetch(p) do {} while(0) #endif +/* + * Macro for VMDq feature for 1 GbE NIC. + */ +#define E1000_VMOLR_SIZE (8) + /********************************************************************* * * TX function @@ -1081,8 +1085,13 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, if (mz) return mz; +#ifdef RTE_LIBRTE_XEN_DOM0 + return rte_memzone_reserve_bounded(z_name, ring_size, + socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M); +#else return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, IGB_ALIGN); +#endif } static void @@ -1129,16 +1138,16 @@ igb_reset_tx_queue_stat(struct igb_tx_queue *txq) static void igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) { + static const union e1000_adv_tx_desc zeroed_desc = { .read = { + .buffer_addr = 0}}; struct igb_tx_entry *txe = txq->sw_ring; - uint32_t size; uint16_t i, prev; struct e1000_hw *hw; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - size = sizeof(union e1000_adv_tx_desc) * txq->nb_tx_desc; /* Zero out HW ring memory */ - for (i = 0; i < size; i++) { - ((volatile char *)txq->tx_ring)[i] = 0; + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i] = zeroed_desc; } /* Initialize ring entries */ @@ -1229,15 +1238,20 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, txq->pthresh = tx_conf->tx_thresh.pthresh; txq->hthresh = tx_conf->tx_thresh.hthresh; txq->wthresh = tx_conf->tx_thresh.wthresh; + if (txq->wthresh > 0 && hw->mac.type == e1000_82576) + txq->wthresh = 1; txq->queue_id = queue_idx; txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); txq->port_id = dev->data->port_id; txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx)); +#ifndef RTE_LIBRTE_XEN_DOM0 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr; - txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr; - +#else + txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); +#endif + txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr; /* Allocate software ring */ txq->sw_ring = rte_zmalloc("txq->sw_ring", sizeof(struct igb_tx_entry) * nb_desc, @@ -1290,13 +1304,13 @@ eth_igb_rx_queue_release(void *rxq) static void igb_reset_rx_queue(struct igb_rx_queue *rxq) { - unsigned size; + static const union e1000_adv_rx_desc zeroed_desc = { .read = { + .pkt_addr = 0}}; unsigned i; /* Zero out HW ring memory */ - size = sizeof(union e1000_adv_rx_desc) * rxq->nb_rx_desc; - for (i = 0; i < size; i++) { - ((volatile char *)rxq->rx_ring)[i] = 0; + for (i = 0; i < rxq->nb_rx_desc; i++) { + rxq->rx_ring[i] = zeroed_desc; } rxq->rx_tail = 0; @@ -1345,6 +1359,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, rxq->pthresh = rx_conf->rx_thresh.pthresh; rxq->hthresh = rx_conf->rx_thresh.hthresh; rxq->wthresh = rx_conf->rx_thresh.wthresh; + if (rxq->wthresh > 0 && hw->mac.type == e1000_82576) + rxq->wthresh = 1; rxq->drop_en = rx_conf->rx_drop_en; rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; @@ -1367,7 +1383,11 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, } rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx)); rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx)); +#ifndef RTE_LIBRTE_XEN_DOM0 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr; +#else + rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); +#endif rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr; /* Allocate software ring. */ @@ -1568,6 +1588,118 @@ igb_rss_configure(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_MRQC, mrqc); } +/* + * Check if the mac type support VMDq or not. + * Return 1 if it supports, otherwise, return 0. + */ +static int +igb_is_vmdq_supported(const struct rte_eth_dev *dev) +{ + const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i350: + return 1; + case e1000_82540: + case e1000_82541: + case e1000_82542: + case e1000_82543: + case e1000_82544: + case e1000_82545: + case e1000_82546: + case e1000_82547: + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_82574: + case e1000_82583: + case e1000_i210: + case e1000_i211: + default: + PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n"); + return 0; + } +} + +static int +igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_rx_conf *cfg; + struct e1000_hw *hw; + uint32_t mrqc, vt_ctl, vmolr, rctl; + int i; + + PMD_INIT_LOG(DEBUG, ">>"); + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + /* Check if mac type can support VMDq, return value of 0 means NOT support */ + if (igb_is_vmdq_supported(dev) == 0) + return -1; + + igb_rss_disable(dev); + + /* RCTL: eanble VLAN filter */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* MRQC: enable vmdq */ + mrqc = E1000_READ_REG(hw, E1000_MRQC); + mrqc |= E1000_MRQC_ENABLE_VMDQ; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + + /* VTCTL: pool selection according to VLAN tag */ + vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + if (cfg->enable_default_pool) + vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT); + vt_ctl |= E1000_VT_CTL_IGNORE_MAC; + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); + + /* + * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1 + * Both 82576 and 82580 support it + */ + if (hw->mac.type != e1000_i350) { + for (i = 0; i < E1000_VMOLR_SIZE; i++) { + vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); + vmolr |= E1000_VMOLR_STRVLAN; + E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); + } + } + + /* VFTA - enable all vlan filters */ + for (i = 0; i < IGB_VFTA_SIZE; i++) + E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX); + + /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */ + if (hw->mac.type != e1000_82580) + E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK); + + /* + * RAH/RAL - allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX)); + E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX); + + /* VLVF: set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \ + (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \ + ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \ + E1000_VLVF_POOLSEL_MASK))); + } + + E1000_WRITE_FLUSH(hw); + + return 0; +} + + /********************************************************************* * * Enable receive unit. @@ -1624,10 +1756,20 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev) /* * SRIOV inactive scheme */ - if (dev->data->nb_rx_queues > 1) - igb_rss_configure(dev); - else - igb_rss_disable(dev); + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + igb_rss_configure(dev); + break; + case ETH_MQ_RX_VMDQ_ONLY: + /*Configure general VMDQ only RX parameters*/ + igb_vmdq_rx_hw_configure(dev); + break; + case ETH_MQ_RX_NONE: + /* if mq_mode is none, disable rss mode.*/ + default: + igb_rss_disable(dev); + break; + } } return 0; @@ -1708,8 +1850,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) /* * Configure RX buffer size. */ - mbp_priv = (struct rte_pktmbuf_pool_private *) - ((char *)rxq->mb_pool + sizeof(struct rte_mempool)); + mbp_priv = rte_mempool_get_priv(rxq->mb_pool); buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); if (buf_size >= 1024) { @@ -1784,6 +1925,9 @@ eth_igb_rx_init(struct rte_eth_dev *dev) */ igb_dev_mq_rx_configure(dev); + /* Update the rctl since igb_dev_mq_rx_configure may change its value */ + rctl |= E1000_READ_REG(hw, E1000_RCTL); + /* * Setup the Checksum Register. * Receive Full-Packet Checksum Offload is mutually exclusive with RSS. @@ -1802,8 +1946,11 @@ eth_igb_rx_init(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.hw_strip_crc) { rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - /* set STRCRC bit in all queues for Powerville/Springville */ - if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) { + /* set STRCRC bit in all queues */ + if (hw->mac.type == e1000_i350 || + hw->mac.type == e1000_i210 || + hw->mac.type == e1000_i211 || + hw->mac.type == e1000_i354) { for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, @@ -1815,8 +1962,11 @@ eth_igb_rx_init(struct rte_eth_dev *dev) } else { rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ - /* clear STRCRC bit in all queues for Powerville/Springville */ - if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) { + /* clear STRCRC bit in all queues */ + if (hw->mac.type == e1000_i350 || + hw->mac.type == e1000_i210 || + hw->mac.type == e1000_i211 || + hw->mac.type == e1000_i354) { for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, @@ -1833,7 +1983,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev) (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Make sure VLAN Filters are off. */ - rctl &= ~E1000_RCTL_VFE; + if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY) + rctl &= ~E1000_RCTL_VFE; /* Don't store bad packets. */ rctl &= ~E1000_RCTL_SBP; @@ -1926,6 +2077,11 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* setup MTU */ + e1000_rlpml_set_vf(hw, + (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE)); + /* Configure and enable each RX queue. */ rctl_bsize = 0; dev->rx_pkt_burst = eth_igb_recv_pkts; @@ -1953,8 +2109,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) /* * Configure RX buffer size. */ - mbp_priv = (struct rte_pktmbuf_pool_private *) - ((char *)rxq->mb_pool + sizeof(struct rte_mempool)); + mbp_priv = rte_mempool_get_priv(rxq->mb_pool); buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); if (buf_size >= 1024) { @@ -1999,7 +2154,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) rxdctl &= 0xFFF00000; rxdctl |= (rxq->pthresh & 0x1F); rxdctl |= ((rxq->hthresh & 0x1F) << 8); - if (hw->mac.type == e1000_82576) { + if (hw->mac.type == e1000_vfadapt) { /* * Workaround of 82576 VF Erratum * force set WTHRESH to 1