X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fhns3%2Fhns3_rxtx.h;h=bb309d38eddb2203795d5faf0a8ea96e338918bf;hb=bf38764acc5888502e8df863bc4638b4a047b0d4;hp=703c4b78d4f0f9a54d6b0040531a76ea72c50536;hpb=55d5ad6bb80f209750dc6313bee617410fcd5946;p=dpdk.git diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h index 703c4b78d4..bb309d38ed 100644 --- a/drivers/net/hns3/hns3_rxtx.h +++ b/drivers/net/hns3/hns3_rxtx.h @@ -20,7 +20,7 @@ #define HNS3_DEFAULT_TX_RS_THRESH 32 #define HNS3_TX_FAST_FREE_AHEAD 64 -#define HNS3_DEFAULT_RX_BURST 32 +#define HNS3_DEFAULT_RX_BURST 64 #if (HNS3_DEFAULT_RX_BURST > 64) #error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n" #endif @@ -289,22 +289,14 @@ struct hns3_rx_bd_errors_stats { }; struct hns3_rx_queue { - void *io_base; volatile void *io_head_reg; - struct hns3_adapter *hns; struct hns3_ptype_table *ptype_tbl; struct rte_mempool *mb_pool; struct hns3_desc *rx_ring; - uint64_t rx_ring_phys_addr; /* RX ring DMA address */ - const struct rte_memzone *mz; struct hns3_entry *sw_ring; - struct rte_mbuf *pkt_first_seg; - struct rte_mbuf *pkt_last_seg; - uint16_t queue_id; uint16_t port_id; uint16_t nb_rx_desc; - uint16_t rx_buf_len; /* * threshold for the number of BDs waited to passed to hardware. If the * number exceeds the threshold, driver will pass these BDs to hardware. @@ -318,8 +310,6 @@ struct hns3_rx_queue { /* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */ uint8_t crc_len; - bool rx_deferred_start; /* don't start this queue in dev start */ - bool configured; /* indicate if rx queue has been configured */ /* * Indicate whether ignore the outer VLAN field in the Rx BD reported * by the Hardware. Because the outer VLAN is the PVID if the PVID is @@ -331,23 +321,45 @@ struct hns3_rx_queue { * driver does not need to perform PVID-related operation in Rx. At this * point, the pvid_sw_discard_en will be false. */ - bool pvid_sw_discard_en; - bool ptype_en; /* indicate if the ptype field enabled */ - bool enabled; /* indicate if Rx queue has been enabled */ + uint8_t pvid_sw_discard_en:1; + uint8_t ptype_en:1; /* indicate if the ptype field enabled */ + + uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */ + /* offset_table: used for vector, to solve execute re-order problem */ + uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1]; + + uint16_t bulk_mbuf_num; /* indicate bulk_mbuf valid nums */ struct hns3_rx_basic_stats basic_stats; + + struct rte_mbuf *pkt_first_seg; + struct rte_mbuf *pkt_last_seg; + + struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM]; + /* DFX statistics that driver does not need to discard packets */ struct hns3_rx_dfx_stats dfx_stats; /* Error statistics that driver needs to discard packets */ struct hns3_rx_bd_errors_stats err_stats; - struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM]; - uint16_t bulk_mbuf_num; - - /* offset_table: used for vector, to solve execute re-order problem */ - uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1]; - uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */ struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */ + + + /* + * The following fields are not accessed in the I/O path, so they are + * placed at the end. + */ + void *io_base; + struct hns3_adapter *hns; + uint64_t rx_ring_phys_addr; /* RX ring DMA address */ + const struct rte_memzone *mz; + + uint16_t queue_id; + uint16_t rx_buf_len; + + bool configured; /* indicate if rx queue has been configured */ + bool rx_deferred_start; /* don't start this queue in dev start */ + bool enabled; /* indicate if Rx queue has been enabled */ }; struct hns3_tx_basic_stats { @@ -407,16 +419,11 @@ struct hns3_tx_dfx_stats { }; struct hns3_tx_queue { - void *io_base; + /* The io_tail_reg is write-only if working in tx push mode */ volatile void *io_tail_reg; - struct hns3_adapter *hns; struct hns3_desc *tx_ring; - uint64_t tx_ring_phys_addr; /* TX ring DMA address */ - const struct rte_memzone *mz; struct hns3_entry *sw_ring; - uint16_t queue_id; - uint16_t port_id; uint16_t nb_tx_desc; /* * index of next BD whose corresponding rte_mbuf can be released by @@ -432,21 +439,12 @@ struct hns3_tx_queue { uint16_t tx_free_thresh; /* - * For better performance in tx datapath, releasing mbuf in batches is - * required. - * Only checking the VLD bit of the last descriptor in a batch of the - * thresh descriptors does not mean that these descriptors are all sent - * by hardware successfully. So we need to check that the VLD bits of - * all descriptors are cleared. and then free all mbufs in the batch. - * - tx_rs_thresh - * Number of mbufs released at a time. - * - * - free - * Tx mbuf free array used for preserving temporarily address of mbuf - * released back to mempool, when releasing mbuf in batches. + * The minimum length of the packet supported by hardware in the Tx + * direction. */ - uint16_t tx_rs_thresh; - struct rte_mbuf **free; + uint8_t min_tx_pkt_len; + + uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */ /* * tso mode. @@ -464,7 +462,7 @@ struct hns3_tx_queue { * checksum of packets that need TSO, so network driver software * not need to recalculate it. */ - uint8_t tso_mode; + uint16_t tso_mode:1; /* * udp checksum mode. * value range: @@ -480,16 +478,10 @@ struct hns3_tx_queue { * In this mode, HW does not have the preceding problems and can * directly calculate the checksum of these UDP packets. */ - uint8_t udp_cksum_mode; - /* - * The minimum length of the packet supported by hardware in the Tx - * direction. - */ - uint32_t min_tx_pkt_len; + uint16_t udp_cksum_mode:1; - uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */ - bool tx_deferred_start; /* don't start this queue in dev start */ - bool configured; /* indicate if tx queue has been configured */ + uint16_t simple_bd_enable:1; + uint16_t tx_push_enable:1; /* check whether the tx push is enabled */ /* * Indicate whether add the vlan_tci of the mbuf to the inner VLAN field * of Tx BD. Because the outer VLAN will always be the PVID when the @@ -502,11 +494,44 @@ struct hns3_tx_queue { * PVID-related operations in Tx. And pvid_sw_shift_en will be false at * this point. */ - bool pvid_sw_shift_en; - bool enabled; /* indicate if Tx queue has been enabled */ + uint16_t pvid_sw_shift_en:1; + + /* + * For better performance in tx datapath, releasing mbuf in batches is + * required. + * Only checking the VLD bit of the last descriptor in a batch of the + * thresh descriptors does not mean that these descriptors are all sent + * by hardware successfully. So we need to check that the VLD bits of + * all descriptors are cleared. and then free all mbufs in the batch. + * - tx_rs_thresh + * Number of mbufs released at a time. + * + * - free + * Tx mbuf free array used for preserving temporarily address of mbuf + * released back to mempool, when releasing mbuf in batches. + */ + uint16_t tx_rs_thresh; + struct rte_mbuf **free; struct hns3_tx_basic_stats basic_stats; struct hns3_tx_dfx_stats dfx_stats; + + + /* + * The following fields are not accessed in the I/O path, so they are + * placed at the end. + */ + void *io_base; + struct hns3_adapter *hns; + uint64_t tx_ring_phys_addr; /* TX ring DMA address */ + const struct rte_memzone *mz; + + uint16_t port_id; + uint16_t queue_id; + + bool configured; /* indicate if tx queue has been configured */ + bool tx_deferred_start; /* don't start this queue in dev start */ + bool enabled; /* indicate if Tx queue has been enabled */ }; #define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \ @@ -635,8 +660,25 @@ hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info, return ptype_tbl->l3table[l3id] | ptype_tbl->l4table[l4id]; } -void hns3_dev_rx_queue_release(void *queue); -void hns3_dev_tx_queue_release(void *queue); +/* + * If enable using Tx push feature and also device support it, then use quick + * doorbell (bar45) to inform the hardware. + * + * The other cases (such as: device don't support or user don't enable using) + * then use normal doorbell (bar23) to inform the hardware. + */ +static inline void +hns3_write_txq_tail_reg(struct hns3_tx_queue *txq, uint32_t value) +{ + rte_io_wmb(); + if (txq->tx_push_enable) + rte_write64_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg); + else + rte_write32_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg); +} + +void hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id); +void hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id); void hns3_free_all_queues(struct rte_eth_dev *dev); int hns3_reset_all_tqps(struct hns3_adapter *hns); void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en); @@ -687,6 +729,12 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev); void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev); void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev); +eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev, + eth_tx_prep_t *prep); +uint16_t hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused); + uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id); void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, uint8_t gl_idx, uint16_t gl_value); @@ -717,5 +765,8 @@ int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt); void hns3_enable_rxd_adv_layout(struct hns3_hw *hw); int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); +void hns3_tx_push_init(struct rte_eth_dev *dev); +void hns3_stop_tx_datapath(struct rte_eth_dev *dev); +void hns3_start_tx_datapath(struct rte_eth_dev *dev); #endif /* _HNS3_RXTX_H_ */