X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.h;h=bb18a0195149105b5362c3b5e206676b22fc39ea;hb=323263717774df318d8a6e64ac8bfe546e03b8f6;hp=f72fad025508c22d29f2536462ff02156bc99894;hpb=28f9002ab67ff0632a718afc2572cae723fd25fe;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index f72fad0255..bb18a01951 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -40,6 +40,9 @@ #define ICE_RXDID_COMMS_OVS 22 +extern uint64_t ice_timestamp_dynflag; +extern int ice_timestamp_dynfield_offset; + typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq, @@ -86,8 +89,12 @@ struct ice_rx_queue { bool rx_deferred_start; /* don't start this queue in dev start */ uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */ uint64_t xtr_ol_flag; /* Protocol extraction offload flag */ - ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */ + uint32_t rxdid; /* Receive Flex Descriptor profile ID */ ice_rx_release_mbufs_t rx_rel_mbufs; + uint64_t offloads; + uint32_t time_high; + uint32_t hw_register_set; + const struct rte_memzone *mz; }; struct ice_tx_entry { @@ -132,6 +139,7 @@ struct ice_tx_queue { bool tx_deferred_start; /* don't start this queue in dev start */ bool q_set; /* indicate if tx queue has been configured */ ice_tx_release_mbufs_t tx_rel_mbufs; + const struct rte_memzone *mz; }; /* Offload features */ @@ -208,6 +216,8 @@ int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); void ice_rx_queue_release(void *rxq); void ice_tx_queue_release(void *txq); +void ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); +void ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); void ice_free_queues(struct rte_eth_dev *dev); int ice_fdir_setup_tx_resources(struct ice_pf *pf); int ice_fdir_setup_rx_resources(struct ice_pf *pf); @@ -221,7 +231,7 @@ uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, void ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq); void ice_set_tx_function(struct rte_eth_dev *dev); -uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); +uint32_t ice_rx_queue_count(void *rx_queue); void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo); void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, @@ -249,16 +259,29 @@ uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue, @@ -270,8 +293,8 @@ int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); #define FDIR_PARSING_ENABLE_PER_QUEUE(ad, on) do { \ int i; \ - for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \ - struct ice_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \ + for (i = 0; i < (ad)->pf.dev_data->nb_rx_queues; i++) { \ + struct ice_rx_queue *rxq = (ad)->pf.dev_data->rx_queues[i]; \ if (!rxq) \ continue; \ rxq->fdir_enabled = on; \ @@ -297,4 +320,47 @@ void ice_fdir_rx_parsing_enable(struct ice_adapter *ad, bool on) } } +#define ICE_TIMESYNC_REG_WRAP_GUARD_BAND 10000 + +/* Helper function to convert a 32b nanoseconds timestamp to 64b. */ +static inline +uint64_t ice_tstamp_convert_32b_64b(struct ice_hw *hw, struct ice_adapter *ad, + uint32_t flag, uint32_t in_timestamp) +{ + const uint64_t mask = 0xFFFFFFFF; + uint32_t hi, lo, lo2, delta; + uint64_t ns; + + if (flag) { + lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); + hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); + + /* + * On typical system, the delta between lo and lo2 is ~1000ns, + * so 10000 seems a large-enough but not overly-big guard band. + */ + if (lo > (UINT32_MAX - ICE_TIMESYNC_REG_WRAP_GUARD_BAND)) + lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); + else + lo2 = lo; + + if (lo2 < lo) { + lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0)); + hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0)); + } + + ad->time_hw = ((uint64_t)hi << 32) | lo; + } + + delta = (in_timestamp - (uint32_t)(ad->time_hw & mask)); + if (delta > (mask / 2)) { + delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp); + ns = ad->time_hw - delta; + } else { + ns = ad->time_hw + delta; + } + + return ns; +} + #endif /* _ICE_RXTX_H_ */