X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fe1000%2Figb_rxtx.c;h=25ff5f68fa8471dfcc406d5a60652c7bfce510e1;hb=43d01767b2e18c57c3c6d43df504a8d66fdbca47;hp=128ed0b3a03b9c224be98043828c9c03e21d1265;hpb=a4996bd89c42590f8886454a06a994f71acf89dd;p=dpdk.git diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 128ed0b3a0..25ff5f68fa 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -1452,27 +1452,28 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev) { - uint64_t rx_offload_capa; + uint64_t tx_offload_capa; RTE_SET_USED(dev); - rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; - return rx_offload_capa; + return tx_offload_capa; } uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) { - uint64_t rx_queue_offload_capa; + uint64_t tx_queue_offload_capa; - rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); + tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); - return rx_queue_offload_capa; + return tx_queue_offload_capa; } int @@ -1637,7 +1638,7 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | DEV_RX_OFFLOAD_SCATTER; return rx_offload_capa; @@ -1719,8 +1720,10 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -2370,8 +2373,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx), @@ -2500,10 +2505,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) { - rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ - /* set STRCRC bit in all queues */ + /* clear STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2512,14 +2517,14 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr |= E1000_DVMOLR_STRCRC; + dvmolr &= ~E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } } else { - rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - /* clear STRCRC bit in all queues */ + /* set STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2528,7 +2533,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr &= ~E1000_DVMOLR_STRCRC; + dvmolr |= E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } @@ -2846,11 +2851,17 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, } int -igb_rss_conf_init(struct igb_rte_flow_rss_conf *out, +igb_rss_conf_init(struct rte_eth_dev *dev, + struct igb_rte_flow_rss_conf *out, const struct rte_flow_action_rss *in) { + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (in->key_len > RTE_DIM(out->key) || - in->queue_num > RTE_DIM(out->queue)) + ((hw->mac.type == e1000_82576) && + (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) || + ((hw->mac.type != e1000_82576) && + (in->queue_num > IGB_MAX_RX_QUEUE_NUM))) return -EINVAL; out->conf = (struct rte_flow_action_rss){ .func = in->func, @@ -2939,7 +2950,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev, rss_conf.rss_key = rss_intel_key; /* Default hash key */ igb_hw_rss_hash_set(hw, &rss_conf); - if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf)) + if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf)) return -EINVAL; return 0;