X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.h;h=bb18a0195149105b5362c3b5e206676b22fc39ea;hb=7f89f41860d64a4880197d35a47df05e7d56a6b1;hp=eef76ffdc5b12b145ab78a95f78aeae08b65c19a;hpb=646dcbe6c701ec2fcfec89dd7a2d9d4c45ce631c;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index eef76ffdc5..bb18a01951 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -89,10 +89,12 @@ struct ice_rx_queue { bool rx_deferred_start; /* don't start this queue in dev start */ uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */ uint64_t xtr_ol_flag; /* Protocol extraction offload flag */ - ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */ + uint32_t rxdid; /* Receive Flex Descriptor profile ID */ ice_rx_release_mbufs_t rx_rel_mbufs; uint64_t offloads; uint32_t time_high; + uint32_t hw_register_set; + const struct rte_memzone *mz; }; struct ice_tx_entry { @@ -137,6 +139,7 @@ struct ice_tx_queue { bool tx_deferred_start; /* don't start this queue in dev start */ bool q_set; /* indicate if tx queue has been configured */ ice_tx_release_mbufs_t tx_rel_mbufs; + const struct rte_memzone *mz; }; /* Offload features */ @@ -213,6 +216,8 @@ int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); void ice_rx_queue_release(void *rxq); void ice_tx_queue_release(void *txq); +void ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); +void ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); void ice_free_queues(struct rte_eth_dev *dev); int ice_fdir_setup_tx_resources(struct ice_pf *pf); int ice_fdir_setup_rx_resources(struct ice_pf *pf); @@ -226,7 +231,7 @@ uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, void ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq); void ice_set_tx_function(struct rte_eth_dev *dev); -uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); +uint32_t ice_rx_queue_count(void *rx_queue); void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo); void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, @@ -315,31 +320,44 @@ void ice_fdir_rx_parsing_enable(struct ice_adapter *ad, bool on) } } +#define ICE_TIMESYNC_REG_WRAP_GUARD_BAND 10000 + /* Helper function to convert a 32b nanoseconds timestamp to 64b. */ static inline -uint64_t ice_tstamp_convert_32b_64b(struct ice_hw *hw, uint32_t in_timestamp) +uint64_t ice_tstamp_convert_32b_64b(struct ice_hw *hw, struct ice_adapter *ad, + uint32_t flag, uint32_t in_timestamp) { const uint64_t mask = 0xFFFFFFFF; uint32_t hi, lo, lo2, delta; - uint64_t time, ns; - - lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); - hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); - lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); + uint64_t ns; - if (lo2 < lo) { + if (flag) { lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); - } - time = ((uint64_t)hi << 32) | lo; + /* + * On typical system, the delta between lo and lo2 is ~1000ns, + * so 10000 seems a large-enough but not overly-big guard band. + */ + if (lo > (UINT32_MAX - ICE_TIMESYNC_REG_WRAP_GUARD_BAND)) + lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); + else + lo2 = lo; + + if (lo2 < lo) { + lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); + hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); + } + + ad->time_hw = ((uint64_t)hi << 32) | lo; + } - delta = (in_timestamp - (uint32_t)(time & mask)); + delta = (in_timestamp - (uint32_t)(ad->time_hw & mask)); if (delta > (mask / 2)) { - delta = ((uint32_t)(time & mask) - in_timestamp); - ns = time - delta; + delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp); + ns = ad->time_hw - delta; } else { - ns = time + delta; + ns = ad->time_hw + delta; } return ns;