X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.c;h=c9be9fddde4f9a9813d36449ff25371a59be4a84;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=52640553d60d186fdc888b99269211b3a3802a07;hpb=2d5f6953d56d13a6cea916d5d8384bfc07041991;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 52640553d6..c9be9fddde 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -47,23 +47,23 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) dev->data->dev_conf.rxmode.max_rx_pkt_len); if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - if (rxq->max_pkt_len <= ETHER_MAX_LEN || + if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN || rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must " "be larger than %u and smaller than %u," "as jumbo frame is enabled", - (uint32_t)ETHER_MAX_LEN, + (uint32_t)RTE_ETHER_MAX_LEN, (uint32_t)ICE_FRAME_SIZE_MAX); return -EINVAL; } } else { - if (rxq->max_pkt_len < ETHER_MIN_LEN || - rxq->max_pkt_len > ETHER_MAX_LEN) { + if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN || + rxq->max_pkt_len > RTE_ETHER_MAX_LEN) { PMD_DRV_LOG(ERR, "maximum packet length must be " "larger than %u and smaller than %u, " "as jumbo frame is disabled", - (uint32_t)ETHER_MIN_LEN, - (uint32_t)ETHER_MAX_LEN); + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); return -EINVAL; } } @@ -111,7 +111,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) RTE_PKTMBUF_HEADROOM); /* Check if scattered RX needs to be used. */ - if ((rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) + if (rxq->max_pkt_len > buf_size) dev->data->scattered_rx = 1; rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); @@ -460,8 +460,9 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /* Init the Tx tail register*/ ICE_PCI_REG_WRITE(txq->qtx_tail, 0); - err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1, &txq_elem, - sizeof(txq_elem), NULL); + /* Fix me, we assume TC always 0 here */ + err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, + &txq_elem, sizeof(txq_elem), NULL); if (err) { PMD_DRV_LOG(ERR, "Failed to add lan txq"); return -EIO; @@ -540,9 +541,12 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct ice_tx_queue *txq; struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; enum ice_status status; uint16_t q_ids[1]; uint32_t q_teids[1]; + uint16_t q_handle = tx_queue_id; if (tx_queue_id >= dev->data->nb_tx_queues) { PMD_DRV_LOG(ERR, "TX queue %u is out of range %u", @@ -560,8 +564,9 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) q_ids[0] = txq->reg_idx; q_teids[0] = txq->q_teid; - status = ice_dis_vsi_txq(hw->port_info, 1, q_ids, q_teids, - ICE_NO_RESET, 0, NULL); + /* Fix me, we assume TC always 0 here */ + status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle, + q_ids, q_teids, ICE_NO_RESET, 0, NULL); if (status != ICE_SUCCESS) { PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue"); return -EINVAL; @@ -624,7 +629,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = vsi->base_queue + queue_idx; rxq->port_id = dev->data->port_id; if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) - rxq->crc_len = ETHER_CRC_LEN; + rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; @@ -759,17 +764,32 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, * - tx_rs_thresh must be a divisor of the ring size. * - tx_free_thresh must be greater than 0. * - tx_free_thresh must be less than the size of the ring minus 3. + * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc. * * One descriptor in the TX ring is used as a sentinel to avoid a H/W * race condition, hence the maximum threshold constraints. When set * to zero use default values. */ - tx_rs_thresh = (uint16_t)(tx_conf->tx_rs_thresh ? - tx_conf->tx_rs_thresh : - ICE_DEFAULT_TX_RSBIT_THRESH); tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : ICE_DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = + (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH; + if (tx_conf->tx_rs_thresh) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return -EINVAL; + } if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " "number of TX descriptors minus 2. " @@ -880,6 +900,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, txq->q_set = TRUE; dev->data->tx_queues[queue_idx] = txq; txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; + ice_set_tx_function_flag(dev, txq); return 0; } @@ -1406,17 +1427,17 @@ ice_recv_scattered_pkts(void *rx_queue, */ rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt_len -= ETHER_CRC_LEN; - if (rx_packet_len <= ETHER_CRC_LEN) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (rx_packet_len <= RTE_ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); first_seg->nb_segs--; last_seg->data_len = (uint16_t)(last_seg->data_len - - (ETHER_CRC_LEN - rx_packet_len)); + (RTE_ETHER_CRC_LEN - rx_packet_len)); last_seg->next = NULL; } else rxm->data_len = (uint16_t)(rx_packet_len - - ETHER_CRC_LEN); + RTE_ETHER_CRC_LEN); } first_seg->port = rxq->port_id; @@ -2300,6 +2321,27 @@ ice_set_rx_function(struct rte_eth_dev *dev) } } +void __attribute__((cold)) +ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq) +{ + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Use a simple Tx queue if possible (only fast free is allowed) */ + ad->tx_simple_allowed = + (txq->offloads == + (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) && + txq->tx_rs_thresh >= ICE_TX_MAX_BURST); + + if (ad->tx_simple_allowed) + PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.", + txq->queue_id); + else + PMD_INIT_LOG(DEBUG, + "Simple Tx can NOT be enabled on Tx queue %u.", + txq->queue_id); +} + /********************************************************************* * * TX prep functions