+
+ if (rte_eal_iova_mode() == RTE_IOVA_PA)
+ dpaax_iova_table_depopulate();
+
+ return ret;
+}
+
+static int
+enetc_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t psipmr = 0;
+
+ psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
+
+ /* Setting to enable promiscuous mode*/
+ psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
+
+ enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
+}
+
+static int
+enetc_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t psipmr = 0;
+
+ /* Setting to disable promiscuous mode for SI0*/
+ psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
+ psipmr &= (~ENETC_PSIPMR_SET_UP(0));
+
+ if (dev->data->all_multicast == 0)
+ psipmr &= (~ENETC_PSIPMR_SET_MP(0));
+
+ enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
+}
+
+static int
+enetc_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t psipmr = 0;
+
+ psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
+
+ /* Setting to enable allmulticast mode for SI0*/
+ psipmr |= ENETC_PSIPMR_SET_MP(0);
+
+ enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
+}
+
+static int
+enetc_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t psipmr = 0;
+
+ if (dev->data->promiscuous == 1)
+ return 0; /* must remain in all_multicast mode */
+
+ /* Setting to disable all multicast mode for SI0*/
+ psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
+ ~(ENETC_PSIPMR_SET_MP(0));
+
+ enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
+}
+
+static int
+enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ /* check that mtu is within the allowed range */
+ if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
+ return -EINVAL;
+
+ /*
+ * Refuse mtu that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (dev->data->min_rx_buf_size &&
+ !dev->data->scattered_rx && frame_size >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
+ return -EINVAL;
+ }
+
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads &=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ /*setting the MTU*/
+ enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
+ ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
+
+ return 0;
+}
+
+static int
+enetc_dev_configure(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ uint64_t rx_offloads = eth_conf->rxmode.offloads;
+ uint32_t checksum = L3_CKSUM | L4_CKSUM;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ uint32_t max_len;
+
+ max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(max_len));
+ enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
+ ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
+ 2 * ENETC_MAC_MAXFRM_SIZE);
+ dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ int config;
+
+ config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
+ config |= ENETC_PM0_CRC;
+ enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ checksum &= ~L3_CKSUM;
+
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+ checksum &= ~L4_CKSUM;
+
+ enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
+
+
+ return 0;
+}
+
+static int
+enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *rx_ring;
+ uint32_t rx_data;
+
+ rx_ring = dev->data->rx_queues[qidx];
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
+ rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
+ ENETC_RBMR);
+ rx_data = rx_data | ENETC_RBMR_EN;
+ enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
+ rx_data);
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *rx_ring;
+ uint32_t rx_data;
+
+ rx_ring = dev->data->rx_queues[qidx];
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
+ rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
+ ENETC_RBMR);
+ rx_data = rx_data & (~ENETC_RBMR_EN);
+ enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
+ rx_data);
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *tx_ring;
+ uint32_t tx_data;
+
+ tx_ring = dev->data->tx_queues[qidx];
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
+ tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
+ ENETC_TBMR);
+ tx_data = tx_data | ENETC_TBMR_EN;
+ enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
+ tx_data);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *tx_ring;
+ uint32_t tx_data;
+
+ tx_ring = dev->data->tx_queues[qidx];
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
+ tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
+ ENETC_TBMR);
+ tx_data = tx_data & (~ENETC_TBMR_EN);
+ enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
+ tx_data);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;