rxq->max_pkt_len = max_pkt_len;
if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
- (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
+ (rxq->max_pkt_len + 2 * RTE_VLAN_HLEN) > buf_size) {
dev_data->scattered_rx = 1;
}
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
"vector %u are mapping to all Rx queues",
hw->msix_base);
} else {
- /* If Rx interrupt is reuquired, and we can use
+ /* If Rx interrupt is required, and we can use
* multi interrupts, then the vec is from 1
*/
hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
dev_info->hash_key_size = hw->vf_res->rss_key_size;
dev_info->reta_size = hw->vf_res->rss_lut_size;
dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
+ dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->rx_offload_capa =
RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_TSO |
RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
return rte_eth_linkstatus_set(dev, &new_link);
}
+bool
+ice_dcf_adminq_need_retry(struct ice_adapter *ad)
+{
+ return ad->hw.dcf_enabled &&
+ !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
+}
+
/* Add UDP tunneling port */
static int
ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
return 0;
}
+static inline void
+ice_dcf_reset_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
+{
+ ice_dcf_uninit_hw(eth_dev, hw);
+ ice_dcf_init_hw(eth_dev, hw);
+}
+
+/* Check if reset has been triggered by PF */
+static inline bool
+ice_dcf_is_reset(struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *ad = dev->data->dev_private;
+ struct iavf_hw *hw = &ad->real_hw.avf;
+
+ return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) &
+ IAVF_VF_ARQLEN1_ARQENABLE_MASK);
+}
+
static int
ice_dcf_dev_reset(struct rte_eth_dev *dev)
{
+ struct ice_dcf_adapter *ad = dev->data->dev_private;
+ struct ice_dcf_hw *hw = &ad->real_hw;
int ret;
+ if (ice_dcf_is_reset(dev)) {
+ if (!ad->real_hw.resetting)
+ ad->real_hw.resetting = true;
+ PMD_DRV_LOG(ERR, "The DCF has been reset by PF");
+
+ /*
+ * Simply reset hw to trigger an additional DCF enable/disable
+ * cycle which help to workaround the issue that kernel driver
+ * may not clean up resource during previous reset.
+ */
+ ice_dcf_reset_hw(dev, hw);
+ }
+
ret = ice_dcf_dev_uninit(dev);
if (ret)
return ret;
ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
{
struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
+ struct ice_adapter *parent_adapter = &adapter->parent;
- adapter->real_hw.resetting = false;
eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
+ __atomic_store_n(&parent_adapter->dcf_state_on, false,
+ __ATOMIC_RELAXED);
return -1;
}
+ __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
+
if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);