.tx_queue_release = ice_tx_queue_release,
.dev_infos_get = ice_dev_info_get,
.link_update = ice_link_update,
+ .rxq_info_get = ice_rxq_info_get,
+ .txq_info_get = ice_txq_info_get,
+ .rx_queue_count = ice_rx_queue_count,
};
static void
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_FILTER;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->rx_offload_capa = 0;
+ dev_info->tx_offload_capa = 0;
dev_info->rx_queue_offload_capa = 0;
dev_info->tx_queue_offload_capa = 0;
dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
- dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
-
- dev_info->default_rxconf = (struct rte_eth_rxconf) {
- .rx_thresh = {
- .pthresh = ICE_DEFAULT_RX_PTHRESH,
- .hthresh = ICE_DEFAULT_RX_HTHRESH,
- .wthresh = ICE_DEFAULT_RX_WTHRESH,
- },
- .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
- .rx_drop_en = 0,
- .offloads = 0,
- };
-
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .tx_thresh = {
- .pthresh = ICE_DEFAULT_TX_PTHRESH,
- .hthresh = ICE_DEFAULT_TX_HTHRESH,
- .wthresh = ICE_DEFAULT_TX_WTHRESH,
- },
- .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
- .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
- .offloads = 0,
- };
-
- dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
- .nb_max = ICE_MAX_RING_DESC,
- .nb_min = ICE_MIN_RING_DESC,
- .nb_align = ICE_ALIGN_RING_DESC,
- };
-
- dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
- .nb_max = ICE_MAX_RING_DESC,
- .nb_min = ICE_MIN_RING_DESC,
- .nb_align = ICE_ALIGN_RING_DESC,
- };
dev_info->speed_capa = ETH_LINK_SPEED_10M |
ETH_LINK_SPEED_100M |
rte_free(q);
}
+void
+ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct ice_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct ice_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+uint32_t
+ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define ICE_RXQ_SCAN_INTERVAL 4
+ volatile union ice_rx_desc *rxdp;
+ struct ice_rx_queue *rxq;
+ uint16_t desc = 0;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ while ((desc < rxq->nb_rx_desc) &&
+ ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S) &
+ (1 << ICE_RX_DESC_STATUS_DD_S)) {
+ /**
+ * Check the DD bit of a rx descriptor of each 4 in a group,
+ * to avoid checking too frequently and downgrading performance
+ * too much.
+ */
+ desc += ICE_RXQ_SCAN_INTERVAL;
+ rxdp += ICE_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
void
ice_clear_queues(struct rte_eth_dev *dev)
{