1 /* SPDX-License-Identifier: BSD-3-Clause
6 #include <rte_ethdev_pci.h>
8 #include "enetc_logs.h"
11 int enetc_logtype_pmd;
13 /* Functions Prototypes */
14 static int enetc_dev_configure(struct rte_eth_dev *dev);
15 static int enetc_dev_start(struct rte_eth_dev *dev);
16 static void enetc_dev_stop(struct rte_eth_dev *dev);
17 static void enetc_dev_close(struct rte_eth_dev *dev);
18 static void enetc_dev_infos_get(struct rte_eth_dev *dev,
19 struct rte_eth_dev_info *dev_info);
20 static int enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
21 static int enetc_hardware_init(struct enetc_eth_hw *hw);
22 static int enetc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
23 uint16_t nb_rx_desc, unsigned int socket_id,
24 const struct rte_eth_rxconf *rx_conf,
25 struct rte_mempool *mb_pool);
26 static void enetc_rx_queue_release(void *rxq);
27 static int enetc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
28 uint16_t nb_tx_desc, unsigned int socket_id,
29 const struct rte_eth_txconf *tx_conf);
30 static void enetc_tx_queue_release(void *txq);
31 static const uint32_t *enetc_supported_ptypes_get(struct rte_eth_dev *dev);
34 * The set of PCI devices this driver supports
36 static const struct rte_pci_id pci_id_enetc_map[] = {
37 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
38 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
39 { .vendor_id = 0, /* sentinel */ },
42 /* Features supported by this driver */
43 static const struct eth_dev_ops enetc_ops = {
44 .dev_configure = enetc_dev_configure,
45 .dev_start = enetc_dev_start,
46 .dev_stop = enetc_dev_stop,
47 .dev_close = enetc_dev_close,
48 .link_update = enetc_link_update,
49 .dev_infos_get = enetc_dev_infos_get,
50 .rx_queue_setup = enetc_rx_queue_setup,
51 .rx_queue_release = enetc_rx_queue_release,
52 .tx_queue_setup = enetc_tx_queue_setup,
53 .tx_queue_release = enetc_tx_queue_release,
54 .dev_supported_ptypes_get = enetc_supported_ptypes_get,
58 * Initialisation of the enetc device
61 * - Pointer to the structure rte_eth_dev
65 * - On failure, negative value.
68 enetc_dev_init(struct rte_eth_dev *eth_dev)
71 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
72 struct enetc_eth_hw *hw =
73 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
75 PMD_INIT_FUNC_TRACE();
76 eth_dev->dev_ops = &enetc_ops;
77 eth_dev->rx_pkt_burst = &enetc_recv_pkts;
78 eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
80 /* Retrieving and storing the HW base address of device */
81 hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
82 hw->device_id = pci_dev->id.device_id;
84 error = enetc_hardware_init(hw);
86 ENETC_PMD_ERR("Hardware initialization failed");
90 /* Allocate memory for storing MAC addresses */
91 eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", ETHER_ADDR_LEN, 0);
92 if (!eth_dev->data->mac_addrs) {
93 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
94 "store MAC addresses",
100 /* Copy the permanent MAC address */
101 ether_addr_copy((struct ether_addr *)hw->mac.addr,
102 ð_dev->data->mac_addrs[0]);
104 ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
105 eth_dev->data->port_id, pci_dev->id.vendor_id,
106 pci_dev->id.device_id);
111 enetc_dev_uninit(struct rte_eth_dev *eth_dev)
113 PMD_INIT_FUNC_TRACE();
114 rte_free(eth_dev->data->mac_addrs);
120 enetc_dev_configure(struct rte_eth_dev *dev __rte_unused)
122 PMD_INIT_FUNC_TRACE();
127 enetc_dev_start(struct rte_eth_dev *dev)
129 struct enetc_eth_hw *hw =
130 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
133 PMD_INIT_FUNC_TRACE();
134 val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
136 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PM0_CMD_CFG),
137 val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
140 val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR));
141 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR),
148 enetc_dev_stop(struct rte_eth_dev *dev)
150 struct enetc_eth_hw *hw =
151 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
154 PMD_INIT_FUNC_TRACE();
156 val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR));
157 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR),
158 val & (~ENETC_PMR_EN));
160 val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
162 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PM0_CMD_CFG),
163 val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
167 enetc_dev_close(struct rte_eth_dev *dev)
171 PMD_INIT_FUNC_TRACE();
174 for (i = 0; i < dev->data->nb_rx_queues; i++) {
175 enetc_rx_queue_release(dev->data->rx_queues[i]);
176 dev->data->rx_queues[i] = NULL;
178 dev->data->nb_rx_queues = 0;
180 for (i = 0; i < dev->data->nb_tx_queues; i++) {
181 enetc_tx_queue_release(dev->data->tx_queues[i]);
182 dev->data->tx_queues[i] = NULL;
184 dev->data->nb_tx_queues = 0;
187 static const uint32_t *
188 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
190 static const uint32_t ptypes[] = {
204 /* return 0 means link status changed, -1 means not changed */
206 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
208 struct enetc_eth_hw *hw =
209 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
210 struct rte_eth_link link;
213 PMD_INIT_FUNC_TRACE();
215 memset(&link, 0, sizeof(link));
217 status = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
220 if (status & ENETC_LINK_MODE)
221 link.link_duplex = ETH_LINK_FULL_DUPLEX;
223 link.link_duplex = ETH_LINK_HALF_DUPLEX;
225 if (status & ENETC_LINK_STATUS)
226 link.link_status = ETH_LINK_UP;
228 link.link_status = ETH_LINK_DOWN;
230 switch (status & ENETC_LINK_SPEED_MASK) {
231 case ENETC_LINK_SPEED_1G:
232 link.link_speed = ETH_SPEED_NUM_1G;
235 case ENETC_LINK_SPEED_100M:
236 link.link_speed = ETH_SPEED_NUM_100M;
240 case ENETC_LINK_SPEED_10M:
241 link.link_speed = ETH_SPEED_NUM_10M;
244 return rte_eth_linkstatus_set(dev, &link);
248 enetc_hardware_init(struct enetc_eth_hw *hw)
252 PMD_INIT_FUNC_TRACE();
253 /* Calculating and storing the base HW addresses */
254 hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
255 hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
257 /* Enabling Station Interface */
258 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.reg, ENETC_SIMR),
261 /* Setting to accept broadcast packets for each inetrface */
262 psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0) |
263 ENETC_PSIPMR_SET_VLAN_MP(0);
264 psipmr |= ENETC_PSIPMR_SET_UP(1) | ENETC_PSIPMR_SET_MP(1) |
265 ENETC_PSIPMR_SET_VLAN_MP(1);
266 psipmr |= ENETC_PSIPMR_SET_UP(2) | ENETC_PSIPMR_SET_MP(2) |
267 ENETC_PSIPMR_SET_VLAN_MP(2);
269 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMR),
272 /* Enabling broadcast address */
273 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMAR0(0)),
275 ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMAR1(0)),
282 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
283 struct rte_eth_dev_info *dev_info)
285 PMD_INIT_FUNC_TRACE();
286 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
287 .nb_max = MAX_BD_COUNT,
288 .nb_min = MIN_BD_COUNT,
289 .nb_align = BD_ALIGN,
291 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
292 .nb_max = MAX_BD_COUNT,
293 .nb_min = MIN_BD_COUNT,
294 .nb_align = BD_ALIGN,
296 dev_info->max_rx_queues = MAX_RX_RINGS;
297 dev_info->max_tx_queues = MAX_TX_RINGS;
298 dev_info->max_rx_pktlen = 1500;
302 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
306 size = nb_desc * sizeof(struct enetc_swbd);
307 txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
308 if (txr->q_swbd == NULL)
311 size = nb_desc * sizeof(struct enetc_tx_bd);
312 txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
313 if (txr->bd_base == NULL) {
314 rte_free(txr->q_swbd);
319 txr->bd_count = nb_desc;
320 txr->next_to_clean = 0;
321 txr->next_to_use = 0;
327 enetc_free_bdr(struct enetc_bdr *rxr)
329 rte_free(rxr->q_swbd);
330 rte_free(rxr->bd_base);
336 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
338 int idx = tx_ring->index;
342 base_addr = (uintptr_t)tx_ring->bd_base;
343 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
344 lower_32_bits((uint64_t)base_addr));
345 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
346 upper_32_bits((uint64_t)base_addr));
347 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
348 ENETC_RTBLENR_LEN(tx_ring->bd_count));
350 tbmr = ENETC_TBMR_EN;
352 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
353 enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
354 enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
355 tx_ring->tcir = (void *)((size_t)hw->reg +
356 ENETC_BDR(TX, idx, ENETC_TBCIR));
357 tx_ring->tcisr = (void *)((size_t)hw->reg +
358 ENETC_BDR(TX, idx, ENETC_TBCISR));
362 enetc_alloc_tx_resources(struct rte_eth_dev *dev,
367 struct enetc_bdr *tx_ring;
368 struct rte_eth_dev_data *data = dev->data;
369 struct enetc_eth_adapter *priv =
370 ENETC_DEV_PRIVATE(data->dev_private);
372 tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
373 if (tx_ring == NULL) {
374 ENETC_PMD_ERR("Failed to allocate TX ring memory");
379 err = enetc_alloc_txbdr(tx_ring, nb_desc);
383 tx_ring->index = queue_idx;
385 enetc_setup_txbdr(&priv->hw.hw, tx_ring);
386 data->tx_queues[queue_idx] = tx_ring;
396 enetc_tx_queue_setup(struct rte_eth_dev *dev,
399 unsigned int socket_id __rte_unused,
400 const struct rte_eth_txconf *tx_conf __rte_unused)
404 PMD_INIT_FUNC_TRACE();
405 if (nb_desc > MAX_BD_COUNT)
408 err = enetc_alloc_tx_resources(dev, queue_idx, nb_desc);
414 enetc_tx_queue_release(void *txq)
419 struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
420 struct enetc_eth_hw *eth_hw =
421 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
423 struct enetc_swbd *tx_swbd;
427 /* Disable the ring */
429 val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
430 val &= (~ENETC_TBMR_EN);
431 enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
434 i = tx_ring->next_to_clean;
435 tx_swbd = &tx_ring->q_swbd[i];
436 while (tx_swbd->buffer_addr != NULL) {
437 rte_pktmbuf_free(tx_swbd->buffer_addr);
438 tx_swbd->buffer_addr = NULL;
441 if (unlikely(i == tx_ring->bd_count)) {
443 tx_swbd = &tx_ring->q_swbd[i];
447 enetc_free_bdr(tx_ring);
452 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
457 size = nb_rx_desc * sizeof(struct enetc_swbd);
458 rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
459 if (rxr->q_swbd == NULL)
462 size = nb_rx_desc * sizeof(union enetc_rx_bd);
463 rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
464 if (rxr->bd_base == NULL) {
465 rte_free(rxr->q_swbd);
470 rxr->bd_count = nb_rx_desc;
471 rxr->next_to_clean = 0;
472 rxr->next_to_use = 0;
473 rxr->next_to_alloc = 0;
479 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
480 struct rte_mempool *mb_pool)
482 int idx = rx_ring->index;
486 base_addr = (uintptr_t)rx_ring->bd_base;
487 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
488 lower_32_bits((uint64_t)base_addr));
489 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
490 upper_32_bits((uint64_t)base_addr));
491 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
492 ENETC_RTBLENR_LEN(rx_ring->bd_count));
494 rx_ring->mb_pool = mb_pool;
496 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
497 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
498 rx_ring->rcir = (void *)((size_t)hw->reg +
499 ENETC_BDR(RX, idx, ENETC_RBCIR));
500 enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
501 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
502 RTE_PKTMBUF_HEADROOM);
503 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
507 enetc_alloc_rx_resources(struct rte_eth_dev *dev,
508 uint16_t rx_queue_id,
510 struct rte_mempool *mb_pool)
513 struct enetc_bdr *rx_ring;
514 struct rte_eth_dev_data *data = dev->data;
515 struct enetc_eth_adapter *adapter =
516 ENETC_DEV_PRIVATE(data->dev_private);
518 rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
519 if (rx_ring == NULL) {
520 ENETC_PMD_ERR("Failed to allocate RX ring memory");
525 err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
529 rx_ring->index = rx_queue_id;
531 enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
532 data->rx_queues[rx_queue_id] = rx_ring;
542 enetc_rx_queue_setup(struct rte_eth_dev *dev,
543 uint16_t rx_queue_id,
545 unsigned int socket_id __rte_unused,
546 const struct rte_eth_rxconf *rx_conf __rte_unused,
547 struct rte_mempool *mb_pool)
551 PMD_INIT_FUNC_TRACE();
552 if (nb_rx_desc > MAX_BD_COUNT)
555 err = enetc_alloc_rx_resources(dev, rx_queue_id,
563 enetc_rx_queue_release(void *rxq)
568 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
569 struct enetc_eth_hw *eth_hw =
570 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
571 struct enetc_swbd *q_swbd;
576 /* Disable the ring */
578 val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
579 val &= (~ENETC_RBMR_EN);
580 enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
583 i = rx_ring->next_to_clean;
584 q_swbd = &rx_ring->q_swbd[i];
585 while (i != rx_ring->next_to_use) {
586 rte_pktmbuf_free(q_swbd->buffer_addr);
587 q_swbd->buffer_addr = NULL;
590 if (unlikely(i == rx_ring->bd_count)) {
592 q_swbd = &rx_ring->q_swbd[i];
596 enetc_free_bdr(rx_ring);
601 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
602 struct rte_pci_device *pci_dev)
604 return rte_eth_dev_pci_generic_probe(pci_dev,
605 sizeof(struct enetc_eth_adapter),
610 enetc_pci_remove(struct rte_pci_device *pci_dev)
612 return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
615 static struct rte_pci_driver rte_enetc_pmd = {
616 .id_table = pci_id_enetc_map,
617 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
618 .probe = enetc_pci_probe,
619 .remove = enetc_pci_remove,
622 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
623 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
624 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
626 RTE_INIT(enetc_pmd_init_log)
628 enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
629 if (enetc_logtype_pmd >= 0)
630 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);