X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=ca6fb693da3277bef65851a985355878533ad69e;hb=71f39b07b6856c5f7d1c1aae8e9e5866d2a9a974;hp=e0318171f00dd38cf2f10288453732f02f6eb399;hpb=b7fcd13c90a3a543a34a7725c5a6bf319a358b96;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index e0318171f0..ca6fb693da 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -415,7 +415,6 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; - tx_offload_mask.l4_len |= ~0; break; case PKT_TX_SCTP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP | @@ -573,7 +572,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct ixgbe_tx_entry *sw_ring; struct ixgbe_tx_entry *txe, *txn; volatile union ixgbe_adv_tx_desc *txr; - volatile union ixgbe_adv_tx_desc *txd; + volatile union ixgbe_adv_tx_desc *txd, *txp; struct rte_mbuf *tx_pkt; struct rte_mbuf *m_seg; uint64_t buf_dma_addr; @@ -596,6 +595,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, txr = txq->tx_ring; tx_id = txq->tx_tail; txe = &sw_ring[tx_id]; + txp = NULL; /* Determine if the descriptor ring needs to be cleaned. */ if (txq->nb_tx_free < txq->tx_free_thresh) @@ -639,6 +639,12 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx); + if (txp != NULL && + nb_used + txq->nb_tx_used >= txq->tx_rs_thresh) + /* set RS on the previous packet in the burst */ + txp->read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_TXD_CMD_RS); + /* * The number of descriptors that must be allocated for a * packet is the number of segments of that packet, plus 1 @@ -841,10 +847,18 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Update txq RS bit counters */ txq->nb_tx_used = 0; - } + txp = NULL; + } else + txp = txd; + txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len); } + end_of_tx: + /* set RS on last packet in the burst */ + if (txp != NULL) + txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS); + rte_wmb(); /* @@ -1820,25 +1834,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, * **********************************************************************/ -/* - * Rings setup and release. - * - * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be - * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will - * also optimize cache line size effect. H/W supports up to cache line size 128. - */ -#define IXGBE_ALIGN 128 - -/* - * Maximum number of Ring Descriptors. - * - * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring - * descriptors should meet the following condition: - * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 - */ -#define IXGBE_MIN_RING_DESC 32 -#define IXGBE_MAX_RING_DESC 4096 - /* * Create memzone for HW rings. malloc can't be used as the physical address is * needed. If the memzone is already created, then this function returns a ptr @@ -2007,9 +2002,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * It must not exceed hardware maximum, and must be multiple * of IXGBE_ALIGN. */ - if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 || - (nb_desc > IXGBE_MAX_RING_DESC) || - (nb_desc < IXGBE_MIN_RING_DESC)) { + if (nb_desc % IXGBE_TXD_ALIGN != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { return -EINVAL; } @@ -2039,9 +2034,16 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " - "of TX descriptors minus 2. (tx_rs_thresh=%u " - "port=%d queue=%d)", (unsigned int)tx_rs_thresh, - (int)dev->data->port_id, (int)queue_idx); + "of TX descriptors minus 2. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. " + "(tx_rs_thresh=%u port=%d queue=%d)", + DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } if (tx_free_thresh >= (nb_desc - 3)) { @@ -2374,9 +2376,9 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, * It must not exceed hardware maximum, and must be multiple * of IXGBE_ALIGN. */ - if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 || - (nb_desc > IXGBE_MAX_RING_DESC) || - (nb_desc < IXGBE_MIN_RING_DESC)) { + if (nb_desc % IXGBE_RXD_ALIGN != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { return (-EINVAL); } @@ -2647,11 +2649,13 @@ ixgbe_rss_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; uint32_t mrqc; + uint32_t mrqc_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); mrqc &= ~IXGBE_MRQC_RSSEN; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); } static void @@ -2662,6 +2666,11 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) uint32_t rss_key; uint64_t rss_hf; uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; + + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); hash_key = rss_conf->rss_key; if (hash_key != NULL) { @@ -2671,7 +2680,7 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) rss_key |= hash_key[(i * 4) + 1] << 8; rss_key |= hash_key[(i * 4) + 2] << 16; rss_key |= hash_key[(i * 4) + 3] << 24; - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key); + IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key); } } @@ -2696,7 +2705,7 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; if (rss_hf & ETH_RSS_IPV6_UDP_EX) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); } int @@ -2706,9 +2715,17 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, struct ixgbe_hw *hw; uint32_t mrqc; uint64_t rss_hf; + uint32_t mrqc_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!ixgbe_rss_update_sp(hw->mac.type)) { + PMD_DRV_LOG(ERR, "RSS hash update is not supported on this " + "NIC."); + return -ENOTSUP; + } + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + /* * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS): * "RSS enabling cannot be done dynamically while it must be @@ -2719,7 +2736,7 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, * disabled at initialization time. */ rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL; - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -(EINVAL); @@ -2742,13 +2759,17 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, uint32_t rss_key; uint64_t rss_hf; uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); hash_key = rss_conf->rss_key; if (hash_key != NULL) { /* Return RSS hash key */ for (i = 0; i < 10; i++) { - rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i); + rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i); hash_key[(i * 4)] = rss_key & 0x000000FF; hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF; hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF; @@ -2757,7 +2778,7 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, } /* Get RSS functions configured in MRQC register */ - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */ rss_conf->rss_hf = 0; return 0; @@ -2793,22 +2814,28 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) uint32_t reta; uint16_t i; uint16_t j; + uint16_t sp_reta_size; + uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + /* * Fill in redirection table * The byte-swap is needed because NIC registers are in * little-endian order. */ reta = 0; - for (i = 0, j = 0; i < 128; i++, j++) { + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + if (j == dev->data->nb_rx_queues) j = 0; reta = (reta << 8) | j; if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), + IXGBE_WRITE_REG(hw, reta_reg, rte_bswap32(reta)); } @@ -2903,7 +2930,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) * mapping is done with 3 bits per priority, * so shift by i*3 each time */ - queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3)); + queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3)); IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping); @@ -3038,7 +3065,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, } /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = vmdq_rx_conf->dcb_queue[i]; + j = vmdq_rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3066,7 +3093,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = vmdq_tx_conf->dcb_queue[i]; + j = vmdq_tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3088,7 +3115,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = rx_conf->dcb_queue[i]; + j = rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3109,7 +3136,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = tx_conf->dcb_queue[i]; + j = tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3144,9 +3171,13 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_VMDQRT4TCEN; else { + /* no matter the mode is DCB or DCB_RSS, just + * set the MRQE to RSSXTCEN. RSS is controlled + * by RSS_FIELD + */ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_RT4TCEN; + IXGBE_MRQC_RTRSS4TCEN; } } if (dcb_config->num_tcs.pg_tcs == 8) { @@ -3156,7 +3187,7 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, else { IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_RT8TCEN; + IXGBE_MRQC_RTRSS8TCEN; } } @@ -3261,16 +3292,17 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, *get dcb and VT rx configuration parameters *from rte_eth_conf */ - ixgbe_vmdq_dcb_rx_config(dev,dcb_config); + ixgbe_vmdq_dcb_rx_config(dev, dcb_config); /*Configure general VMDQ and DCB RX parameters*/ ixgbe_vmdq_dcb_configure(dev); } break; case ETH_MQ_RX_DCB: + case ETH_MQ_RX_DCB_RSS: dcb_config->vt_mode = false; config_dcb_rx = DCB_RX_CONFIG; /* Get dcb TX configuration parameters from rte_eth_conf */ - ixgbe_dcb_rx_config(dev,dcb_config); + ixgbe_dcb_rx_config(dev, dcb_config); /*Configure general DCB RX parameters*/ ixgbe_dcb_rx_hw_config(hw, dcb_config); break; @@ -3292,7 +3324,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, dcb_config->vt_mode = false; config_dcb_tx = DCB_TX_CONFIG; /*get DCB TX configuration parameters from rte_eth_conf*/ - ixgbe_dcb_tx_config(dev,dcb_config); + ixgbe_dcb_tx_config(dev, dcb_config); /*Configure general DCB TX parameters*/ ixgbe_dcb_tx_hw_config(hw, dcb_config); break; @@ -3433,14 +3465,15 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev) /* check support mq_mode for DCB */ if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) && - (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB)) + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) && + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)) return; if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES) return; /** Configure DCB hardware **/ - ixgbe_dcb_hw_configure(dev,dcb_cfg); + ixgbe_dcb_hw_configure(dev, dcb_cfg); return; } @@ -3682,21 +3715,25 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) * any DCB/RSS w/o VMDq multi-queue setting */ switch (dev->data->dev_conf.rxmode.mq_mode) { - case ETH_MQ_RX_RSS: - ixgbe_rss_configure(dev); - break; + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_DCB_RSS: + case ETH_MQ_RX_VMDQ_RSS: + ixgbe_rss_configure(dev); + break; - case ETH_MQ_RX_VMDQ_DCB: - ixgbe_vmdq_dcb_configure(dev); - break; + case ETH_MQ_RX_VMDQ_DCB: + ixgbe_vmdq_dcb_configure(dev); + break; - case ETH_MQ_RX_VMDQ_ONLY: - ixgbe_vmdq_rx_hw_configure(dev); - break; + case ETH_MQ_RX_VMDQ_ONLY: + ixgbe_vmdq_rx_hw_configure(dev); + break; - case ETH_MQ_RX_NONE: - /* if mq_mode is none, disable rss mode.*/ - default: ixgbe_rss_disable(dev); + case ETH_MQ_RX_NONE: + default: + /* if mq_mode is none, disable rss mode.*/ + ixgbe_rss_disable(dev); + break; } } else { /* @@ -3940,7 +3977,9 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) */ } else if (adapter->rx_vec_allowed) { PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " - "burst size no less than 32."); + "burst size no less than %d (port=%d).", + RTE_IXGBE_DESCS_PER_LOOP, + dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts_vec; } else if (adapter->rx_bulk_alloc_allowed) { @@ -4496,6 +4535,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; } else return -1; @@ -4539,6 +4579,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) ixgbe_rx_queue_release_mbufs(rxq); ixgbe_reset_rx_queue(adapter, rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } else return -1; @@ -4581,6 +4622,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; } else return -1; @@ -4641,12 +4683,50 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) txq->ops->release_mbufs(txq); txq->ops->reset(txq); } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } else return -1; return 0; } +void +ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct ixgbe_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +void +ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct ixgbe_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + /* * [VF] Initializes Receive Unit. */