+static uint16_t
+ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **bufs,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static int
+ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
+{
+ struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
+ struct rte_eth_dev_data *dev_data = dev->data;
+ struct iavf_hw *hw = &dcf_ad->real_hw.avf;
+ uint16_t buf_size, max_pkt_len;
+
+ buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+ rxq->rx_hdr_len = 0;
+ rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
+ max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+ dev->data->mtu + ICE_ETH_OVERHEAD);
+
+ /* Check maximum packet length is set correctly. */
+ if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
+ max_pkt_len > ICE_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u",
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)ICE_FRAME_SIZE_MAX);
+ return -EINVAL;
+ }
+
+ rxq->max_pkt_len = max_pkt_len;
+ if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
+ (rxq->max_pkt_len + 2 * RTE_VLAN_HLEN) > buf_size) {
+ dev_data->scattered_rx = 1;
+ }
+ rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
+ IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ IAVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
+{
+ struct ice_rx_queue **rxq =
+ (struct ice_rx_queue **)dev->data->rx_queues;
+ int i, ret;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!rxq[i] || !rxq[i]->q_set)
+ continue;
+ ret = ice_dcf_init_rxq(dev, rxq[i]);
+ if (ret)
+ return ret;
+ }
+
+ ice_set_rx_function(dev);
+ ice_set_tx_function(dev);
+
+ return 0;
+}
+
+#define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+#define IAVF_ITR_INDEX_DEFAULT 0
+#define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+
+static inline uint16_t
+iavf_calc_itr_interval(int16_t interval)
+{
+ if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
+ interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
+
+ /* Convert to hardware count, as writing each 1 represents 2 us */
+ return interval / 2;
+}
+
+static int
+ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+ struct ice_dcf_hw *hw = &adapter->real_hw;
+ uint16_t interval, i;
+ int vec;
+
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
+ if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+ dev->data->nb_rx_queues)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
+ dev->data->nb_rx_queues);
+ return -1;
+ }
+ }
+
+ if (!dev->data->dev_conf.intr_conf.rxq ||
+ !rte_intr_dp_is_en(intr_handle)) {
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+ hw->nb_msix = 1;
+ if (hw->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+ /* If WB_ON_ITR supports, enable it */
+ hw->msix_base = IAVF_RX_VEC_START;
+ /* Set the ITR for index zero, to 2us to make sure that
+ * we leave time for aggregation to occur, but don't
+ * increase latency dramatically.
+ */
+ IAVF_WRITE_REG(&hw->avf,
+ IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
+ (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+ IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
+ (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
+ } else {
+ /* If no WB_ON_ITR offload flags, need to set
+ * interrupt for descriptor write back.
+ */
+ hw->msix_base = IAVF_MISC_VEC_ID;
+
+ /* set ITR to max */
+ interval =
+ iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
+ IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
+ IAVF_VFINT_DYN_CTL01_INTENA_MASK |
+ (IAVF_ITR_INDEX_DEFAULT <<
+ IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
+ (interval <<
+ IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
+ }
+ IAVF_WRITE_FLUSH(&hw->avf);
+ /* map all queues to the same interrupt */
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ hw->rxq_map[hw->msix_base] |= 1 << i;
+ } else {
+ if (!rte_intr_allow_others(intr_handle)) {
+ hw->nb_msix = 1;
+ hw->msix_base = IAVF_MISC_VEC_ID;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ hw->rxq_map[hw->msix_base] |= 1 << i;
+ rte_intr_vec_list_index_set(intr_handle,
+ i, IAVF_MISC_VEC_ID);
+ }
+ PMD_DRV_LOG(DEBUG,
+ "vector %u are mapping to all Rx queues",
+ hw->msix_base);
+ } else {
+ /* If Rx interrupt is required, and we can use
+ * multi interrupts, then the vec is from 1
+ */
+ hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
+ rte_intr_nb_efd_get(intr_handle));
+ hw->msix_base = IAVF_MISC_VEC_ID;
+ vec = IAVF_MISC_VEC_ID;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ hw->rxq_map[vec] |= 1 << i;
+ rte_intr_vec_list_index_set(intr_handle,
+ i, vec++);
+ if (vec >= hw->nb_msix)
+ vec = IAVF_RX_VEC_START;
+ }
+ PMD_DRV_LOG(DEBUG,
+ "%u vectors are mapping to %u Rx queues",
+ hw->nb_msix, dev->data->nb_rx_queues);
+ }
+ }
+
+ if (ice_dcf_config_irq_map(hw)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+alloc_rxq_mbufs(struct ice_rx_queue *rxq)
+{
+ volatile union ice_rx_flex_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &rxq->rx_ring[i];
+ rxd->read.pkt_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ rxd->read.rsvd1 = 0;
+ rxd->read.rsvd2 = 0;
+#endif
+
+ rxq->sw_ring[i].mbuf = (void *)mbuf;
+ }
+
+ return 0;
+}
+
+static int
+ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct ice_dcf_adapter *ad = dev->data->dev_private;
+ struct iavf_hw *hw = &ad->real_hw.avf;
+ struct ice_rx_queue *rxq;
+ int err = 0;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = alloc_rxq_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail register. */
+ IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ IAVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ return err;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static inline void
+reset_rx_queue(struct ice_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
+
+ for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+ for (i = 0; i < ICE_RX_MAX_BURST; i++)
+ rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+
+ /* for rx bulk */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+}
+
+static inline void
+reset_tx_queue(struct ice_tx_queue *txq)
+{
+ struct ice_tx_entry *txe;
+ uint32_t i, size;
+ uint16_t prev;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ txe = txq->sw_ring;
+ size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->tx_ring)[i] = 0;
+
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i].cmd_type_offset_bsz =
+ rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+
+ txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+ txq->nb_tx_free = txq->nb_tx_desc - 1;
+
+ txq->tx_next_dd = txq->tx_rs_thresh - 1;
+ txq->tx_next_rs = txq->tx_rs_thresh - 1;
+}
+
+static int
+ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct ice_dcf_adapter *ad = dev->data->dev_private;
+ struct ice_dcf_hw *hw = &ad->real_hw;
+ struct ice_rx_queue *rxq;
+ int err;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxq->rx_rel_mbufs(rxq);
+ reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct ice_dcf_adapter *ad = dev->data->dev_private;
+ struct iavf_hw *hw = &ad->real_hw.avf;
+ struct ice_tx_queue *txq;
+ int err = 0;
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /* Init the RX tail register. */
+ txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
+ IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IAVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
+ }
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct ice_dcf_adapter *ad = dev->data->dev_private;
+ struct ice_dcf_hw *hw = &ad->real_hw;
+ struct ice_tx_queue *txq;
+ int err;
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
+ }
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ txq->tx_rel_mbufs(txq);
+ reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+ice_dcf_start_queues(struct rte_eth_dev *dev)
+{
+ struct ice_rx_queue *rxq;
+ struct ice_tx_queue *txq;
+ int nb_rxq = 0;
+ int nb_txq, i;
+
+ for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
+ txq = dev->data->tx_queues[nb_txq];
+ if (txq->tx_deferred_start)
+ continue;
+ if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
+ goto tx_err;
+ }
+ }
+
+ for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
+ rxq = dev->data->rx_queues[nb_rxq];
+ if (rxq->rx_deferred_start)
+ continue;
+ if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
+ goto rx_err;
+ }
+ }
+
+ return 0;
+
+ /* stop the started queues if failed to start all queues */
+rx_err:
+ for (i = 0; i < nb_rxq; i++)
+ ice_dcf_rx_queue_stop(dev, i);
+tx_err:
+ for (i = 0; i < nb_txq; i++)
+ ice_dcf_tx_queue_stop(dev, i);
+
+ return -1;
+}
+
+static int
+ice_dcf_dev_start(struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+ struct ice_adapter *ad = &dcf_ad->parent;
+ struct ice_dcf_hw *hw = &dcf_ad->real_hw;
+ int ret;
+
+ if (hw->resetting) {
+ PMD_DRV_LOG(ERR,
+ "The DCF has been reset by PF, please reinit first");
+ return -EIO;
+ }
+
+ if (hw->tm_conf.root && !hw->tm_conf.committed) {
+ PMD_DRV_LOG(ERR,
+ "please call hierarchy_commit() before starting the port");
+ return -EIO;
+ }
+
+ ad->pf.adapter_stopped = 0;
+
+ hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+
+ ret = ice_dcf_init_rx_queues(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to init queues");
+ return ret;
+ }
+
+ if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ ret = ice_dcf_init_rss(hw);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to configure RSS");
+ return ret;
+ }
+ }
+
+ ret = ice_dcf_configure_queues(hw);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to config queues");
+ return ret;
+ }
+
+ ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
+ return ret;
+ }
+
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ rte_intr_disable(intr_handle);
+ rte_intr_enable(intr_handle);
+ }
+
+ ret = ice_dcf_start_queues(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to enable queues");
+ return ret;
+ }
+
+ ret = ice_dcf_add_del_all_mac_addr(hw, hw->eth_dev->data->mac_addrs,
+ true, VIRTCHNL_ETHER_ADDR_PRIMARY);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to add mac addr");
+ return ret;
+ }
+
+ if (dcf_ad->mc_addrs_num) {
+ /* flush previous addresses */
+ ret = dcf_add_del_mc_addr_list(hw, dcf_ad->mc_addrs,
+ dcf_ad->mc_addrs_num, true);
+ if (ret)
+ return ret;
+ }
+
+
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
+
+ return 0;
+}
+
+static void
+ice_dcf_stop_queues(struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *ad = dev->data->dev_private;
+ struct ice_dcf_hw *hw = &ad->real_hw;
+ struct ice_rx_queue *rxq;
+ struct ice_tx_queue *txq;
+ int ret, i;
+
+ /* Stop All queues */
+ ret = ice_dcf_disable_queues(hw);
+ if (ret)
+ PMD_DRV_LOG(WARNING, "Fail to stop queues");
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+ txq->tx_rel_mbufs(txq);
+ reset_tx_queue(txq);
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq)
+ continue;
+ rxq->rx_rel_mbufs(rxq);
+ reset_rx_queue(rxq);
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+}
+
+static int
+ice_dcf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+ struct ice_adapter *ad = &dcf_ad->parent;
+ struct ice_dcf_hw *hw = &dcf_ad->real_hw;
+
+ if (ad->pf.adapter_stopped == 1) {
+ PMD_DRV_LOG(DEBUG, "Port is already stopped");
+ return 0;
+ }
+
+ /* Stop the VF representors for this device */
+ ice_dcf_vf_repr_stop_all(dcf_ad);
+
+ ice_dcf_stop_queues(dev);
+
+ rte_intr_efd_disable(intr_handle);
+ rte_intr_vec_list_free(intr_handle);
+
+ ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw,
+ dcf_ad->real_hw.eth_dev->data->mac_addrs,
+ false, VIRTCHNL_ETHER_ADDR_PRIMARY);
+
+ if (dcf_ad->mc_addrs_num)
+ /* flush previous addresses */
+ (void)dcf_add_del_mc_addr_list(&dcf_ad->real_hw,
+ dcf_ad->mc_addrs,
+ dcf_ad->mc_addrs_num, false);
+
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+ ad->pf.adapter_stopped = 1;
+ hw->tm_conf.committed = false;
+
+ return 0;
+}
+
+static int
+ice_dcf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
+ struct ice_adapter *ad = &dcf_ad->parent;
+
+ ad->rx_bulk_alloc_allowed = true;
+ ad->tx_simple_allowed = true;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+ return 0;
+}
+
+static int
+ice_dcf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+ struct ice_dcf_hw *hw = &adapter->real_hw;
+
+ dev_info->max_mac_addrs = DCF_NUM_MACADDR_MAX;
+ dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
+ dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
+ dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = hw->vf_res->rss_key_size;
+ dev_info->reta_size = hw->vf_res->rss_lut_size;
+ dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
+ dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+
+ dev_info->rx_offload_capa =
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ dev_info->tx_offload_capa =
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = ICE_DEFAULT_RX_PTHRESH,
+ .hthresh = ICE_DEFAULT_RX_HTHRESH,
+ .wthresh = ICE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = ICE_DEFAULT_TX_PTHRESH,
+ .hthresh = ICE_DEFAULT_TX_HTHRESH,
+ .wthresh = ICE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ICE_MAX_RING_DESC,
+ .nb_min = ICE_MIN_RING_DESC,
+ .nb_align = ICE_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ICE_MAX_RING_DESC,
+ .nb_min = ICE_MIN_RING_DESC,
+ .nb_align = ICE_ALIGN_RING_DESC,
+ };
+
+ return 0;
+}
+
+static int
+dcf_config_promisc(struct ice_dcf_adapter *adapter,
+ bool enable_unicast,
+ bool enable_multicast)
+{
+ struct ice_dcf_hw *hw = &adapter->real_hw;
+ struct virtchnl_promisc_info promisc;
+ struct dcf_virtchnl_cmd args;
+ int err;
+
+ promisc.flags = 0;
+ promisc.vsi_id = hw->vsi_res->vsi_id;
+
+ if (enable_unicast)
+ promisc.flags |= FLAG_VF_UNICAST_PROMISC;
+
+ if (enable_multicast)
+ promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
+
+ memset(&args, 0, sizeof(args));
+ args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ args.req_msg = (uint8_t *)&promisc;
+ args.req_msglen = sizeof(promisc);
+
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "fail to execute command VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE");
+ return err;
+ }
+
+ adapter->promisc_unicast_enabled = enable_unicast;
+ adapter->promisc_multicast_enabled = enable_multicast;
+ return 0;
+}
+
+static int
+ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+
+ if (adapter->promisc_unicast_enabled) {
+ PMD_DRV_LOG(INFO, "promiscuous has been enabled");
+ return 0;
+ }
+
+ return dcf_config_promisc(adapter, true,
+ adapter->promisc_multicast_enabled);
+}
+
+static int
+ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+
+ if (!adapter->promisc_unicast_enabled) {
+ PMD_DRV_LOG(INFO, "promiscuous has been disabled");
+ return 0;
+ }
+
+ return dcf_config_promisc(adapter, false,
+ adapter->promisc_multicast_enabled);
+}
+
+static int
+ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+
+ if (adapter->promisc_multicast_enabled) {
+ PMD_DRV_LOG(INFO, "allmulticast has been enabled");
+ return 0;
+ }
+
+ return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
+ true);
+}
+
+static int
+ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+
+ if (!adapter->promisc_multicast_enabled) {
+ PMD_DRV_LOG(INFO, "allmulticast has been disabled");
+ return 0;
+ }
+
+ return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
+ false);
+}
+
+static int
+dcf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+ int err;
+
+ if (rte_is_zero_ether_addr(addr)) {
+ PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
+ return -EINVAL;
+ }
+
+ err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, true,
+ VIRTCHNL_ETHER_ADDR_EXTRA);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to add MAC address");
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+dcf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+ struct rte_ether_addr *addr = &dev->data->mac_addrs[index];
+ int err;
+
+ err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, false,
+ VIRTCHNL_ETHER_ADDR_EXTRA);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to remove MAC address");
+}
+
+static int
+dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
+ struct rte_ether_addr *mc_addrs,
+ uint32_t mc_addrs_num, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct dcf_virtchnl_cmd args;
+ uint32_t i;
+ int len, err = 0;
+
+ len = sizeof(struct virtchnl_ether_addr_list);
+ len += sizeof(struct virtchnl_ether_addr) * mc_addrs_num;
+
+ list = rte_zmalloc(NULL, len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
+ sizeof(list->list[i].addr));
+ list->list[i].type = VIRTCHNL_ETHER_ADDR_EXTRA;
+ }
+
+ list->vsi_id = hw->vsi_res->vsi_id;
+ list->num_elements = mc_addrs_num;
+
+ memset(&args, 0, sizeof(args));
+ args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
+ VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.req_msg = (uint8_t *)list;
+ args.req_msglen = len;
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETHER_ADDRESS" :
+ "OP_DEL_ETHER_ADDRESS");
+ rte_free(list);
+ return err;
+}
+
+static int
+dcf_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+ struct ice_dcf_hw *hw = &adapter->real_hw;
+ uint32_t i;
+ int ret;
+
+
+ if (mc_addrs_num > DCF_NUM_MACADDR_MAX) {
+ PMD_DRV_LOG(ERR,
+ "can't add more than a limited number (%u) of addresses.",
+ (uint32_t)DCF_NUM_MACADDR_MAX);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
+ const uint8_t *mac = mc_addrs[i].addr_bytes;
+
+ PMD_DRV_LOG(ERR,
+ "Invalid mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ mac[0], mac[1], mac[2], mac[3], mac[4],
+ mac[5]);
+ return -EINVAL;
+ }
+ }
+
+ if (adapter->mc_addrs_num) {
+ /* flush previous addresses */
+ ret = dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
+ adapter->mc_addrs_num, false);
+ if (ret)
+ return ret;
+ }
+ if (!mc_addrs_num) {
+ adapter->mc_addrs_num = 0;
+ return 0;
+ }
+
+ /* add new ones */
+ ret = dcf_add_del_mc_addr_list(hw, mc_addrs, mc_addrs_num, true);
+ if (ret) {
+ /* if adding mac address list fails, should add the
+ * previous addresses back.
+ */
+ if (adapter->mc_addrs_num)
+ (void)dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
+ adapter->mc_addrs_num,
+ true);
+ return ret;
+ }
+ adapter->mc_addrs_num = mc_addrs_num;
+ memcpy(adapter->mc_addrs,
+ mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
+
+ return 0;
+}
+
+static int
+dcf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+ struct ice_dcf_hw *hw = &adapter->real_hw;
+ struct rte_ether_addr *old_addr;
+ int ret;
+
+ old_addr = hw->eth_dev->data->mac_addrs;
+ if (rte_is_same_ether_addr(old_addr, mac_addr))
+ return 0;
+
+ ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, old_addr, false,
+ VIRTCHNL_ETHER_ADDR_PRIMARY);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
+ " %02X:%02X:%02X:%02X:%02X:%02X",
+ old_addr->addr_bytes[0],
+ old_addr->addr_bytes[1],
+ old_addr->addr_bytes[2],
+ old_addr->addr_bytes[3],
+ old_addr->addr_bytes[4],
+ old_addr->addr_bytes[5]);
+
+ ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, mac_addr, true,
+ VIRTCHNL_ETHER_ADDR_PRIMARY);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Fail to add new MAC:"
+ " %02X:%02X:%02X:%02X:%02X:%02X",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5]);
+
+ if (ret)
+ return -EIO;
+
+ rte_ether_addr_copy(mac_addr, hw->eth_dev->data->mac_addrs);
+ return 0;
+}
+
+static int
+dcf_add_del_vlan_v2(struct ice_dcf_hw *hw, uint16_t vlanid, bool add)
+{
+ struct virtchnl_vlan_supported_caps *supported_caps =
+ &hw->vlan_v2_caps.filtering.filtering_support;
+ struct virtchnl_vlan *vlan_setting;
+ struct virtchnl_vlan_filter_list_v2 vlan_filter;
+ struct dcf_virtchnl_cmd args;
+ uint32_t filtering_caps;
+ int err;
+
+ if (supported_caps->outer) {
+ filtering_caps = supported_caps->outer;
+ vlan_setting = &vlan_filter.filters[0].outer;
+ } else {
+ filtering_caps = supported_caps->inner;
+ vlan_setting = &vlan_filter.filters[0].inner;
+ }
+
+ if (!(filtering_caps & VIRTCHNL_VLAN_ETHERTYPE_8100))
+ return -ENOTSUP;
+
+ memset(&vlan_filter, 0, sizeof(vlan_filter));
+ vlan_filter.vport_id = hw->vsi_res->vsi_id;
+ vlan_filter.num_elements = 1;
+ vlan_setting->tpid = RTE_ETHER_TYPE_VLAN;
+ vlan_setting->tci = vlanid;
+
+ memset(&args, 0, sizeof(args));
+ args.v_op = add ? VIRTCHNL_OP_ADD_VLAN_V2 : VIRTCHNL_OP_DEL_VLAN_V2;
+ args.req_msg = (uint8_t *)&vlan_filter;
+ args.req_msglen = sizeof(vlan_filter);
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_VLAN_V2" : "OP_DEL_VLAN_V2");
+
+ return err;
+}
+
+static int
+dcf_add_del_vlan(struct ice_dcf_hw *hw, uint16_t vlanid, bool add)
+{
+ struct virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
+ sizeof(uint16_t)];
+ struct dcf_virtchnl_cmd args;
+ int err;
+
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list->vsi_id = hw->vsi_res->vsi_id;
+ vlan_list->num_elements = 1;
+ vlan_list->vlan_id[0] = vlanid;
+
+ memset(&args, 0, sizeof(args));
+ args.v_op = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN;
+ args.req_msg = cmd_buffer;
+ args.req_msglen = sizeof(cmd_buffer);
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_VLAN" : "OP_DEL_VLAN");
+
+ return err;
+}
+
+static int
+dcf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
+ struct ice_dcf_hw *hw = &adapter->real_hw;
+ int err;
+
+ if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ err = dcf_add_del_vlan_v2(hw, vlan_id, on);
+ if (err)
+ return -EIO;
+ return 0;
+ }
+
+ if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ return -ENOTSUP;
+
+ err = dcf_add_del_vlan(hw, vlan_id, on);
+ if (err)
+ return -EIO;
+ return 0;
+}
+
+static void
+dcf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)