+static
+int enetc_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+
+ /* Total received packets, bad + good, if we want to get counters of
+ * only good received packets then use ENETC_PM0_RFRM,
+ * ENETC_PM0_TFRM registers.
+ */
+ stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
+ stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
+ stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
+ stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
+ /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
+ * truncated packets
+ */
+ stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
+ stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
+ stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
+
+ return 0;
+}
+
+static int
+enetc_stats_reset(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+
+ enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
+
+ return 0;
+}
+
+static void
+enetc_dev_close(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ enetc_dev_stop(dev);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ enetc_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ enetc_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+static int
+enetc_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t psipmr = 0;
+
+ psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
+
+ /* Setting to enable promiscuous mode*/
+ psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
+
+ enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
+}
+
+static int
+enetc_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t psipmr = 0;
+
+ /* Setting to disable promiscuous mode for SI0*/
+ psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
+ psipmr &= (~ENETC_PSIPMR_SET_UP(0));
+
+ if (dev->data->all_multicast == 0)
+ psipmr &= (~ENETC_PSIPMR_SET_MP(0));
+
+ enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
+}
+
+static int
+enetc_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t psipmr = 0;
+
+ psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
+
+ /* Setting to enable allmulticast mode for SI0*/
+ psipmr |= ENETC_PSIPMR_SET_MP(0);
+
+ enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
+}
+
+static int
+enetc_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t psipmr = 0;
+
+ if (dev->data->promiscuous == 1)
+ return 0; /* must remain in all_multicast mode */
+
+ /* Setting to disable all multicast mode for SI0*/
+ psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
+ ~(ENETC_PSIPMR_SET_MP(0));
+
+ enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
+}
+
+static int
+enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ /* check that mtu is within the allowed range */
+ if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
+ return -EINVAL;
+
+ /*
+ * Refuse mtu that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (dev->data->min_rx_buf_size &&
+ !dev->data->scattered_rx && frame_size >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
+ return -EINVAL;
+ }
+
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads &=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ /*setting the MTU*/
+ enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
+ ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
+
+ return 0;
+}
+
+static int
+enetc_dev_configure(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ uint64_t rx_offloads = eth_conf->rxmode.offloads;
+ uint32_t checksum = L3_CKSUM | L4_CKSUM;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ uint32_t max_len;
+
+ max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(max_len));
+ enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
+ ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
+ 2 * ENETC_MAC_MAXFRM_SIZE);
+ dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ int config;
+
+ config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
+ config |= ENETC_PM0_CRC;
+ enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ checksum &= ~L3_CKSUM;
+
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+ checksum &= ~L4_CKSUM;
+
+ enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
+
+
+ return 0;
+}
+
+static int
+enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *rx_ring;
+ uint32_t rx_data;
+
+ rx_ring = dev->data->rx_queues[qidx];
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
+ rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
+ ENETC_RBMR);
+ rx_data = rx_data | ENETC_RBMR_EN;
+ enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
+ rx_data);
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *rx_ring;
+ uint32_t rx_data;
+
+ rx_ring = dev->data->rx_queues[qidx];
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
+ rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
+ ENETC_RBMR);
+ rx_data = rx_data & (~ENETC_RBMR_EN);
+ enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
+ rx_data);
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *tx_ring;
+ uint32_t tx_data;
+
+ tx_ring = dev->data->tx_queues[qidx];
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
+ tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
+ ENETC_TBMR);
+ tx_data = tx_data | ENETC_TBMR_EN;
+ enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
+ tx_data);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *tx_ring;
+ uint32_t tx_data;
+
+ tx_ring = dev->data->tx_queues[qidx];
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
+ tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
+ ENETC_TBMR);
+ tx_data = tx_data & (~ENETC_TBMR_EN);
+ enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
+ tx_data);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_enetc_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+/* Features supported by this driver */
+static const struct eth_dev_ops enetc_ops = {
+ .dev_configure = enetc_dev_configure,
+ .dev_start = enetc_dev_start,
+ .dev_stop = enetc_dev_stop,
+ .dev_close = enetc_dev_close,
+ .link_update = enetc_link_update,
+ .stats_get = enetc_stats_get,
+ .stats_reset = enetc_stats_reset,
+ .promiscuous_enable = enetc_promiscuous_enable,
+ .promiscuous_disable = enetc_promiscuous_disable,
+ .allmulticast_enable = enetc_allmulticast_enable,
+ .allmulticast_disable = enetc_allmulticast_disable,
+ .dev_infos_get = enetc_dev_infos_get,
+ .mtu_set = enetc_mtu_set,
+ .rx_queue_setup = enetc_rx_queue_setup,
+ .rx_queue_start = enetc_rx_queue_start,
+ .rx_queue_stop = enetc_rx_queue_stop,
+ .rx_queue_release = enetc_rx_queue_release,
+ .tx_queue_setup = enetc_tx_queue_setup,
+ .tx_queue_start = enetc_tx_queue_start,
+ .tx_queue_stop = enetc_tx_queue_stop,
+ .tx_queue_release = enetc_tx_queue_release,
+ .dev_supported_ptypes_get = enetc_supported_ptypes_get,
+};
+
+/**
+ * Initialisation of the enetc device
+ *
+ * @param eth_dev
+ * - Pointer to the structure rte_eth_dev
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static int
+enetc_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int error = 0;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ eth_dev->dev_ops = &enetc_ops;
+ eth_dev->rx_pkt_burst = &enetc_recv_pkts;
+ eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
+
+ /* Retrieving and storing the HW base address of device */
+ hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
+ hw->device_id = pci_dev->id.device_id;
+
+ error = enetc_hardware_init(hw);
+ if (error != 0) {
+ ENETC_PMD_ERR("Hardware initialization failed");
+ return -1;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
+ RTE_ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ RTE_ETHER_ADDR_LEN * 1);
+ error = -ENOMEM;
+ return -1;
+ }
+
+ /* Copy the permanent MAC address */
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
+ ð_dev->data->mac_addrs[0]);
+
+ /* Set MTU */
+ enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
+ eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
+
+ if (rte_eal_iova_mode() == RTE_IOVA_PA)
+ dpaax_iova_table_populate();
+
+ ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+ return 0;
+}
+
+static int
+enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_iova_mode() == RTE_IOVA_PA)
+ dpaax_iova_table_depopulate();
+
+ return 0;
+}
+