#define ICE_VPMD_RX_BURST 32
#define ICE_VPMD_TX_BURST 32
-#define ICE_RXQ_REARM_THRESH 32
+#define ICE_RXQ_REARM_THRESH 64
#define ICE_MAX_RX_BURST ICE_RXQ_REARM_THRESH
#define ICE_TX_MAX_FREE_BUF_SZ 64
#define ICE_DESCS_PER_LOOP 4
#define ICE_RXDID_COMMS_OVS 22
+extern uint64_t ice_timestamp_dynflag;
+extern int ice_timestamp_dynfield_offset;
+
typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
bool rx_deferred_start; /* don't start this queue in dev start */
uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
- ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
+ uint32_t rxdid; /* Receive Flex Descriptor profile ID */
ice_rx_release_mbufs_t rx_rel_mbufs;
+ uint64_t offloads;
+ uint32_t time_high;
+ uint32_t hw_register_set;
+ const struct rte_memzone *mz;
};
struct ice_tx_entry {
uint16_t last_id;
};
+struct ice_vec_tx_entry {
+ struct rte_mbuf *mbuf;
+};
+
struct ice_tx_queue {
uint16_t nb_tx_desc; /* number of TX descriptors */
rte_iova_t tx_ring_dma; /* TX ring DMA address */
bool tx_deferred_start; /* don't start this queue in dev start */
bool q_set; /* indicate if tx queue has been configured */
ice_tx_release_mbufs_t tx_rel_mbufs;
+ const struct rte_memzone *mz;
};
/* Offload features */
int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
void ice_rx_queue_release(void *rxq);
void ice_tx_queue_release(void *txq);
+void ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void ice_free_queues(struct rte_eth_dev *dev);
int ice_fdir_setup_tx_resources(struct ice_pf *pf);
int ice_fdir_setup_rx_resources(struct ice_pf *pf);
void ice_set_tx_function_flag(struct rte_eth_dev *dev,
struct ice_tx_queue *txq);
void ice_set_tx_function(struct rte_eth_dev *dev);
-uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t ice_rx_queue_count(void *rx_queue);
void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
void ice_set_default_ptype_table(struct rte_eth_dev *dev);
const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+void ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq,
+ uint32_t rxdid);
int ice_rx_vec_dev_check(struct rte_eth_dev *dev);
int ice_tx_vec_dev_check(struct rte_eth_dev *dev);
uint16_t nb_pkts);
uint16_t ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
+int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
#define FDIR_PARSING_ENABLE_PER_QUEUE(ad, on) do { \
int i; \
- for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
- struct ice_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+ for (i = 0; i < (ad)->pf.dev_data->nb_rx_queues; i++) { \
+ struct ice_rx_queue *rxq = (ad)->pf.dev_data->rx_queues[i]; \
if (!rxq) \
continue; \
rxq->fdir_enabled = on; \
}
}
+#define ICE_TIMESYNC_REG_WRAP_GUARD_BAND 10000
+
+/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
+static inline
+uint64_t ice_tstamp_convert_32b_64b(struct ice_hw *hw, struct ice_adapter *ad,
+ uint32_t flag, uint32_t in_timestamp)
+{
+ const uint64_t mask = 0xFFFFFFFF;
+ uint32_t hi, lo, lo2, delta;
+ uint64_t ns;
+
+ if (flag) {
+ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+
+ /*
+ * On typical system, the delta between lo and lo2 is ~1000ns,
+ * so 10000 seems a large-enough but not overly-big guard band.
+ */
+ if (lo > (UINT32_MAX - ICE_TIMESYNC_REG_WRAP_GUARD_BAND))
+ lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+ else
+ lo2 = lo;
+
+ if (lo2 < lo) {
+ lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+ hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+ }
+
+ ad->time_hw = ((uint64_t)hi << 32) | lo;
+ }
+
+ delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
+ if (delta > (mask / 2)) {
+ delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
+ ns = ad->time_hw - delta;
+ } else {
+ ns = ad->time_hw + delta;
+ }
+
+ return ns;
+}
+
#endif /* _ICE_RXTX_H_ */