X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_rxtx.c;h=308d0a671bd416cf4638fba12f69acabaa7aba92;hb=ca5a25c5f0a8b684c9125c8ecac802d54fad2c9a;hp=b0253d5f602a952aa1e6df190bbe8f881b2a8c20;hpb=fc9b57ff576c4088138a8e0e53650b806927e683;p=dpdk.git diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index b0253d5f60..308d0a671b 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -536,7 +536,10 @@ hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, return; addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE; - value = HNS3_GL_USEC_TO_REG(gl_value); + if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US) + value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US; + else + value = HNS3_GL_USEC_TO_REG(gl_value); hns3_write_dev(hw, addr, value); } @@ -557,6 +560,21 @@ hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value) hns3_write_dev(hw, addr, value); } +void +hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value) +{ + uint32_t addr; + + if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL) + return; + + addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + hns3_write_dev(hw, addr, ql_value); + + addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + hns3_write_dev(hw, addr, ql_value); +} + static void hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en) { @@ -1251,6 +1269,12 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, return -EINVAL; } + if (conf->rx_drop_en == 0) + hns3_warn(hw, "if there are no available Rx descriptors," + "incoming packets are always dropped. input parameter" + " conf->rx_drop_en(%u) is uneffective.", + conf->rx_drop_en); + if (dev->data->rx_queues[idx]) { hns3_rx_queue_release(dev->data->rx_queues[idx]); dev->data->rx_queues[idx] = NULL; @@ -1312,6 +1336,12 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, rxq->ol3_csum_erros = 0; rxq->ol4_csum_erros = 0; + /* CRC len set here is used for amending packet length */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + rte_spinlock_lock(&hw->lock); dev->data->rx_queues[idx] = rxq; rte_spinlock_unlock(&hw->lock); @@ -1578,6 +1608,23 @@ hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb, } } +static inline void +recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg, + struct rte_mbuf *rxm, struct hns3_rx_queue *rxq, + uint16_t data_len) +{ + uint8_t crc_len = rxq->crc_len; + + if (data_len <= crc_len) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = (uint16_t)(last_seg->data_len - + (crc_len - data_len)); + last_seg->next = NULL; + } else + rxm->data_len = (uint16_t)(data_len - crc_len); +} + uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -1708,7 +1755,11 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxdp->rx.bd_base_info = 0; rxdp->addr = dma_addr; - /* Load remained descriptor data and extract necessary fields */ + /* + * Load remained descriptor data and extract necessary fields. + * Data size from buffer description may contains CRC len, + * packet len should subtract it. + */ data_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.size)); l234_info = rte_le_to_cpu_32(rxd.rx.l234_info); ol_info = rte_le_to_cpu_32(rxd.rx.ol_info); @@ -1729,9 +1780,31 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) continue; } - /* The last buffer of the received packet */ + /* + * The last buffer of the received packet. packet len from + * buffer description may contains CRC len, packet len should + * subtract it, same as data len. + */ pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)); first_seg->pkt_len = pkt_len; + + /* + * This is the last buffer of the received packet. If the CRC + * is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. If part + * of the CRC is also contained in the previous mbuf, subtract + * the length of that CRC part from the data length of the + * previous mbuf. + */ + rxm->next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= rxq->crc_len; + recalculate_data_len(first_seg, last_seg, rxm, rxq, + data_len); + } + first_seg->port = rxq->port_id; first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); first_seg->ol_flags = PKT_RX_RSS_HASH; @@ -1740,7 +1813,6 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_le_to_cpu_32(rxd.rx.fd_id); first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; } - rxm->next = NULL; gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M, HNS3_RXD_GRO_SIZE_S); @@ -1843,6 +1915,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, txq->configured = true; txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + idx * HNS3_TQP_REG_SIZE); + txq->min_tx_pkt_len = hw->min_tx_pkt_len; txq->over_length_pkt_cnt = 0; txq->exceed_limit_bd_pkt_cnt = 0; txq->exceed_limit_bd_reassem_fail = 0; @@ -2671,14 +2744,16 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* - * If packet length is less than minimum packet size, driver - * need to pad it. + * If packet length is less than minimum packet length supported + * by hardware in Tx direction, driver need to pad it to avoid + * error. */ - if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) { + if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < + txq->min_tx_pkt_len)) { uint16_t add_len; char *appended; - add_len = HNS3_MIN_PKT_SIZE - + add_len = txq->min_tx_pkt_len - rte_pktmbuf_pkt_len(tx_pkt); appended = rte_pktmbuf_append(tx_pkt, add_len); if (appended == NULL) { @@ -2766,3 +2841,34 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst; } } + +void +hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->nb_desc = rxq->nb_rx_desc; + qinfo->scattered_rx = dev->data->scattered_rx; + + /* + * If there are no available Rx buffer descriptors, incoming packets + * are always dropped by hardware based on hns3 network engine. + */ + qinfo->conf.rx_drop_en = 1; + qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +void +hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +}