1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
6 #include <rte_ethdev_pci.h>
8 #include "enetc_logs.h"
11 int enetc_logtype_pmd;
13 /* Functions Prototypes */
14 static int enetc_dev_configure(struct rte_eth_dev *dev);
15 static int enetc_dev_start(struct rte_eth_dev *dev);
16 static void enetc_dev_stop(struct rte_eth_dev *dev);
17 static void enetc_dev_close(struct rte_eth_dev *dev);
18 static void enetc_dev_infos_get(struct rte_eth_dev *dev,
19 struct rte_eth_dev_info *dev_info);
20 static int enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
21 static int enetc_hardware_init(struct enetc_eth_hw *hw);
22 static int enetc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
23 uint16_t nb_rx_desc, unsigned int socket_id,
24 const struct rte_eth_rxconf *rx_conf,
25 struct rte_mempool *mb_pool);
26 static void enetc_rx_queue_release(void *rxq);
27 static int enetc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
28 uint16_t nb_tx_desc, unsigned int socket_id,
29 const struct rte_eth_txconf *tx_conf);
30 static void enetc_tx_queue_release(void *txq);
31 static const uint32_t *enetc_supported_ptypes_get(struct rte_eth_dev *dev);
32 static int enetc_stats_get(struct rte_eth_dev *dev,
33 struct rte_eth_stats *stats);
34 static void enetc_stats_reset(struct rte_eth_dev *dev);
37 * The set of PCI devices this driver supports
39 static const struct rte_pci_id pci_id_enetc_map[] = {
40 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
41 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
42 { .vendor_id = 0, /* sentinel */ },
45 /* Features supported by this driver */
46 static const struct eth_dev_ops enetc_ops = {
47 .dev_configure = enetc_dev_configure,
48 .dev_start = enetc_dev_start,
49 .dev_stop = enetc_dev_stop,
50 .dev_close = enetc_dev_close,
51 .link_update = enetc_link_update,
52 .stats_get = enetc_stats_get,
53 .stats_reset = enetc_stats_reset,
54 .dev_infos_get = enetc_dev_infos_get,
55 .rx_queue_setup = enetc_rx_queue_setup,
56 .rx_queue_release = enetc_rx_queue_release,
57 .tx_queue_setup = enetc_tx_queue_setup,
58 .tx_queue_release = enetc_tx_queue_release,
59 .dev_supported_ptypes_get = enetc_supported_ptypes_get,
63 * Initialisation of the enetc device
66 * - Pointer to the structure rte_eth_dev
70 * - On failure, negative value.
73 enetc_dev_init(struct rte_eth_dev *eth_dev)
76 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
77 struct enetc_eth_hw *hw =
78 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
80 PMD_INIT_FUNC_TRACE();
81 eth_dev->dev_ops = &enetc_ops;
82 eth_dev->rx_pkt_burst = &enetc_recv_pkts;
83 eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
85 /* Retrieving and storing the HW base address of device */
86 hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
87 hw->device_id = pci_dev->id.device_id;
89 error = enetc_hardware_init(hw);
91 ENETC_PMD_ERR("Hardware initialization failed");
95 /* Allocate memory for storing MAC addresses */
96 eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", ETHER_ADDR_LEN, 0);
97 if (!eth_dev->data->mac_addrs) {
98 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
99 "store MAC addresses",
105 /* Copy the permanent MAC address */
106 ether_addr_copy((struct ether_addr *)hw->mac.addr,
107 ð_dev->data->mac_addrs[0]);
109 ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
110 eth_dev->data->port_id, pci_dev->id.vendor_id,
111 pci_dev->id.device_id);
116 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
118 PMD_INIT_FUNC_TRACE();
123 enetc_dev_configure(struct rte_eth_dev *dev __rte_unused)
125 PMD_INIT_FUNC_TRACE();
130 enetc_dev_start(struct rte_eth_dev *dev)
132 struct enetc_eth_hw *hw =
133 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
136 PMD_INIT_FUNC_TRACE();
137 val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
139 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PM0_CMD_CFG),
140 val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
143 val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR));
144 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR),
147 /* set auto-speed for RGMII */
148 if (enetc_port_rd(&hw->hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
149 enetc_port_wr(&hw->hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
150 enetc_port_wr(&hw->hw, ENETC_PM1_IF_MODE, ENETC_PM0_IFM_RGAUTO);
152 if (enetc_global_rd(&hw->hw,
153 ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
154 enetc_port_wr(&hw->hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
155 enetc_port_wr(&hw->hw, ENETC_PM1_IF_MODE, ENETC_PM0_IFM_XGMII);
162 enetc_dev_stop(struct rte_eth_dev *dev)
164 struct enetc_eth_hw *hw =
165 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
168 PMD_INIT_FUNC_TRACE();
170 val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR));
171 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR),
172 val & (~ENETC_PMR_EN));
174 val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
176 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PM0_CMD_CFG),
177 val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
181 enetc_dev_close(struct rte_eth_dev *dev)
185 PMD_INIT_FUNC_TRACE();
188 for (i = 0; i < dev->data->nb_rx_queues; i++) {
189 enetc_rx_queue_release(dev->data->rx_queues[i]);
190 dev->data->rx_queues[i] = NULL;
192 dev->data->nb_rx_queues = 0;
194 for (i = 0; i < dev->data->nb_tx_queues; i++) {
195 enetc_tx_queue_release(dev->data->tx_queues[i]);
196 dev->data->tx_queues[i] = NULL;
198 dev->data->nb_tx_queues = 0;
201 static const uint32_t *
202 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
204 static const uint32_t ptypes[] = {
218 /* return 0 means link status changed, -1 means not changed */
220 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
222 struct enetc_eth_hw *hw =
223 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
224 struct rte_eth_link link;
227 PMD_INIT_FUNC_TRACE();
229 memset(&link, 0, sizeof(link));
231 status = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
234 if (status & ENETC_LINK_MODE)
235 link.link_duplex = ETH_LINK_FULL_DUPLEX;
237 link.link_duplex = ETH_LINK_HALF_DUPLEX;
239 if (status & ENETC_LINK_STATUS)
240 link.link_status = ETH_LINK_UP;
242 link.link_status = ETH_LINK_DOWN;
244 switch (status & ENETC_LINK_SPEED_MASK) {
245 case ENETC_LINK_SPEED_1G:
246 link.link_speed = ETH_SPEED_NUM_1G;
249 case ENETC_LINK_SPEED_100M:
250 link.link_speed = ETH_SPEED_NUM_100M;
254 case ENETC_LINK_SPEED_10M:
255 link.link_speed = ETH_SPEED_NUM_10M;
258 return rte_eth_linkstatus_set(dev, &link);
262 enetc_hardware_init(struct enetc_eth_hw *hw)
266 PMD_INIT_FUNC_TRACE();
267 /* Calculating and storing the base HW addresses */
268 hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
269 hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
271 /* Enabling Station Interface */
272 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.reg, ENETC_SIMR),
275 /* Setting to accept broadcast packets for each inetrface */
276 psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0) |
277 ENETC_PSIPMR_SET_VLAN_MP(0);
278 psipmr |= ENETC_PSIPMR_SET_UP(1) | ENETC_PSIPMR_SET_MP(1) |
279 ENETC_PSIPMR_SET_VLAN_MP(1);
280 psipmr |= ENETC_PSIPMR_SET_UP(2) | ENETC_PSIPMR_SET_MP(2) |
281 ENETC_PSIPMR_SET_VLAN_MP(2);
283 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMR),
286 /* Enabling broadcast address */
287 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMAR0(0)),
289 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMAR1(0)),
296 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
297 struct rte_eth_dev_info *dev_info)
299 PMD_INIT_FUNC_TRACE();
300 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
301 .nb_max = MAX_BD_COUNT,
302 .nb_min = MIN_BD_COUNT,
303 .nb_align = BD_ALIGN,
305 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
306 .nb_max = MAX_BD_COUNT,
307 .nb_min = MIN_BD_COUNT,
308 .nb_align = BD_ALIGN,
310 dev_info->max_rx_queues = MAX_RX_RINGS;
311 dev_info->max_tx_queues = MAX_TX_RINGS;
312 dev_info->max_rx_pktlen = 1500;
316 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
320 size = nb_desc * sizeof(struct enetc_swbd);
321 txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
322 if (txr->q_swbd == NULL)
325 size = nb_desc * sizeof(struct enetc_tx_bd);
326 txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
327 if (txr->bd_base == NULL) {
328 rte_free(txr->q_swbd);
333 txr->bd_count = nb_desc;
334 txr->next_to_clean = 0;
335 txr->next_to_use = 0;
341 enetc_free_bdr(struct enetc_bdr *rxr)
343 rte_free(rxr->q_swbd);
344 rte_free(rxr->bd_base);
350 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
352 int idx = tx_ring->index;
354 phys_addr_t bd_address;
356 bd_address = (phys_addr_t)
357 rte_mem_virt2iova((const void *)tx_ring->bd_base);
358 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
359 lower_32_bits((uint64_t)bd_address));
360 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
361 upper_32_bits((uint64_t)bd_address));
362 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
363 ENETC_RTBLENR_LEN(tx_ring->bd_count));
365 tbmr = ENETC_TBMR_EN;
367 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
368 enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
369 enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
370 tx_ring->tcir = (void *)((size_t)hw->reg +
371 ENETC_BDR(TX, idx, ENETC_TBCIR));
372 tx_ring->tcisr = (void *)((size_t)hw->reg +
373 ENETC_BDR(TX, idx, ENETC_TBCISR));
377 enetc_alloc_tx_resources(struct rte_eth_dev *dev,
382 struct enetc_bdr *tx_ring;
383 struct rte_eth_dev_data *data = dev->data;
384 struct enetc_eth_adapter *priv =
385 ENETC_DEV_PRIVATE(data->dev_private);
387 tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
388 if (tx_ring == NULL) {
389 ENETC_PMD_ERR("Failed to allocate TX ring memory");
394 err = enetc_alloc_txbdr(tx_ring, nb_desc);
398 tx_ring->index = queue_idx;
400 enetc_setup_txbdr(&priv->hw.hw, tx_ring);
401 data->tx_queues[queue_idx] = tx_ring;
411 enetc_tx_queue_setup(struct rte_eth_dev *dev,
414 unsigned int socket_id __rte_unused,
415 const struct rte_eth_txconf *tx_conf __rte_unused)
419 PMD_INIT_FUNC_TRACE();
420 if (nb_desc > MAX_BD_COUNT)
423 err = enetc_alloc_tx_resources(dev, queue_idx, nb_desc);
429 enetc_tx_queue_release(void *txq)
434 struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
435 struct enetc_eth_hw *eth_hw =
436 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
438 struct enetc_swbd *tx_swbd;
442 /* Disable the ring */
444 val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
445 val &= (~ENETC_TBMR_EN);
446 enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
449 i = tx_ring->next_to_clean;
450 tx_swbd = &tx_ring->q_swbd[i];
451 while (tx_swbd->buffer_addr != NULL) {
452 rte_pktmbuf_free(tx_swbd->buffer_addr);
453 tx_swbd->buffer_addr = NULL;
456 if (unlikely(i == tx_ring->bd_count)) {
458 tx_swbd = &tx_ring->q_swbd[i];
462 enetc_free_bdr(tx_ring);
467 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
472 size = nb_rx_desc * sizeof(struct enetc_swbd);
473 rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
474 if (rxr->q_swbd == NULL)
477 size = nb_rx_desc * sizeof(union enetc_rx_bd);
478 rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
479 if (rxr->bd_base == NULL) {
480 rte_free(rxr->q_swbd);
485 rxr->bd_count = nb_rx_desc;
486 rxr->next_to_clean = 0;
487 rxr->next_to_use = 0;
488 rxr->next_to_alloc = 0;
494 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
495 struct rte_mempool *mb_pool)
497 int idx = rx_ring->index;
499 phys_addr_t bd_address;
501 bd_address = (phys_addr_t)
502 rte_mem_virt2iova((const void *)rx_ring->bd_base);
503 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
504 lower_32_bits((uint64_t)bd_address));
505 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
506 upper_32_bits((uint64_t)bd_address));
507 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
508 ENETC_RTBLENR_LEN(rx_ring->bd_count));
510 rx_ring->mb_pool = mb_pool;
511 rx_ring->rcir = (void *)((size_t)hw->reg +
512 ENETC_BDR(RX, idx, ENETC_RBCIR));
513 enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
514 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
515 RTE_PKTMBUF_HEADROOM);
516 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
518 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
519 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
523 enetc_alloc_rx_resources(struct rte_eth_dev *dev,
524 uint16_t rx_queue_id,
526 struct rte_mempool *mb_pool)
529 struct enetc_bdr *rx_ring;
530 struct rte_eth_dev_data *data = dev->data;
531 struct enetc_eth_adapter *adapter =
532 ENETC_DEV_PRIVATE(data->dev_private);
534 rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
535 if (rx_ring == NULL) {
536 ENETC_PMD_ERR("Failed to allocate RX ring memory");
541 err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
545 rx_ring->index = rx_queue_id;
547 enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
548 data->rx_queues[rx_queue_id] = rx_ring;
558 enetc_rx_queue_setup(struct rte_eth_dev *dev,
559 uint16_t rx_queue_id,
561 unsigned int socket_id __rte_unused,
562 const struct rte_eth_rxconf *rx_conf __rte_unused,
563 struct rte_mempool *mb_pool)
567 PMD_INIT_FUNC_TRACE();
568 if (nb_rx_desc > MAX_BD_COUNT)
571 err = enetc_alloc_rx_resources(dev, rx_queue_id,
579 enetc_rx_queue_release(void *rxq)
584 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
585 struct enetc_eth_hw *eth_hw =
586 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
587 struct enetc_swbd *q_swbd;
592 /* Disable the ring */
594 val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
595 val &= (~ENETC_RBMR_EN);
596 enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
599 i = rx_ring->next_to_clean;
600 q_swbd = &rx_ring->q_swbd[i];
601 while (i != rx_ring->next_to_use) {
602 rte_pktmbuf_free(q_swbd->buffer_addr);
603 q_swbd->buffer_addr = NULL;
606 if (unlikely(i == rx_ring->bd_count)) {
608 q_swbd = &rx_ring->q_swbd[i];
612 enetc_free_bdr(rx_ring);
617 int enetc_stats_get(struct rte_eth_dev *dev,
618 struct rte_eth_stats *stats)
620 struct enetc_eth_hw *hw =
621 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
622 struct enetc_hw *enetc_hw = &hw->hw;
624 /* Total received packets, bad + good, if we want to get counters of
625 * only good received packets then use ENETC_PM0_RFRM,
626 * ENETC_PM0_TFRM registers.
628 stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
629 stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
630 stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
631 stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
632 /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
635 stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
636 stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
637 stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
643 enetc_stats_reset(struct rte_eth_dev *dev)
645 struct enetc_eth_hw *hw =
646 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
647 struct enetc_hw *enetc_hw = &hw->hw;
649 enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
653 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
654 struct rte_pci_device *pci_dev)
656 return rte_eth_dev_pci_generic_probe(pci_dev,
657 sizeof(struct enetc_eth_adapter),
662 enetc_pci_remove(struct rte_pci_device *pci_dev)
664 return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
667 static struct rte_pci_driver rte_enetc_pmd = {
668 .id_table = pci_id_enetc_map,
669 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
670 .probe = enetc_pci_probe,
671 .remove = enetc_pci_remove,
674 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
675 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
676 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
678 RTE_INIT(enetc_pmd_init_log)
680 enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
681 if (enetc_logtype_pmd >= 0)
682 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);