1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
6 #include <ethdev_pci.h>
7 #include <rte_random.h>
8 #include <dpaax_iova_table.h>
10 #include "enetc_logs.h"
14 enetc_dev_start(struct rte_eth_dev *dev)
16 struct enetc_eth_hw *hw =
17 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18 struct enetc_hw *enetc_hw = &hw->hw;
21 PMD_INIT_FUNC_TRACE();
22 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
23 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
24 val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
27 val = enetc_port_rd(enetc_hw, ENETC_PMR);
28 enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
30 /* set auto-speed for RGMII */
31 if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
32 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
33 ENETC_PM0_IFM_RGAUTO);
34 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
35 ENETC_PM0_IFM_RGAUTO);
37 if (enetc_global_rd(enetc_hw,
38 ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
39 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
41 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
49 enetc_dev_stop(struct rte_eth_dev *dev)
51 struct enetc_eth_hw *hw =
52 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53 struct enetc_hw *enetc_hw = &hw->hw;
56 PMD_INIT_FUNC_TRACE();
57 dev->data->dev_started = 0;
59 val = enetc_port_rd(enetc_hw, ENETC_PMR);
60 enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
62 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
63 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
64 val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
69 static const uint32_t *
70 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
72 static const uint32_t ptypes[] = {
86 /* return 0 means link status changed, -1 means not changed */
88 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
90 struct enetc_eth_hw *hw =
91 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
92 struct enetc_hw *enetc_hw = &hw->hw;
93 struct rte_eth_link link;
96 PMD_INIT_FUNC_TRACE();
98 memset(&link, 0, sizeof(link));
100 status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
102 if (status & ENETC_LINK_MODE)
103 link.link_duplex = ETH_LINK_FULL_DUPLEX;
105 link.link_duplex = ETH_LINK_HALF_DUPLEX;
107 if (status & ENETC_LINK_STATUS)
108 link.link_status = ETH_LINK_UP;
110 link.link_status = ETH_LINK_DOWN;
112 switch (status & ENETC_LINK_SPEED_MASK) {
113 case ENETC_LINK_SPEED_1G:
114 link.link_speed = ETH_SPEED_NUM_1G;
117 case ENETC_LINK_SPEED_100M:
118 link.link_speed = ETH_SPEED_NUM_100M;
122 case ENETC_LINK_SPEED_10M:
123 link.link_speed = ETH_SPEED_NUM_10M;
126 return rte_eth_linkstatus_set(dev, &link);
130 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
132 char buf[RTE_ETHER_ADDR_FMT_SIZE];
134 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
135 ENETC_PMD_NOTICE("%s%s\n", name, buf);
139 enetc_hardware_init(struct enetc_eth_hw *hw)
141 struct enetc_hw *enetc_hw = &hw->hw;
142 uint32_t *mac = (uint32_t *)hw->mac.addr;
143 uint32_t high_mac = 0;
144 uint16_t low_mac = 0;
146 PMD_INIT_FUNC_TRACE();
147 /* Calculating and storing the base HW addresses */
148 hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
149 hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
151 /* WA for Rx lock-up HW erratum */
152 enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1);
154 /* set ENETC transaction flags to coherent, don't allocate.
155 * BD writes merge with surrounding cache line data, frame data writes
156 * overwrite cache line.
158 enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT);
160 /* Enabling Station Interface */
161 enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
163 *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
164 high_mac = (uint32_t)*mac;
166 *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
167 low_mac = (uint16_t)*mac;
169 if ((high_mac | low_mac) == 0) {
172 ENETC_PMD_NOTICE("MAC is not available for this SI, "
174 mac = (uint32_t *)hw->mac.addr;
175 *mac = (uint32_t)rte_rand();
176 first_byte = (char *)mac;
177 *first_byte &= 0xfe; /* clear multicast bit */
178 *first_byte |= 0x02; /* set local assignment bit (IEEE802) */
180 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
182 *mac = (uint16_t)rte_rand();
183 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
184 print_ethaddr("New address: ",
185 (const struct rte_ether_addr *)hw->mac.addr);
192 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
193 struct rte_eth_dev_info *dev_info)
195 PMD_INIT_FUNC_TRACE();
196 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
197 .nb_max = MAX_BD_COUNT,
198 .nb_min = MIN_BD_COUNT,
199 .nb_align = BD_ALIGN,
201 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
202 .nb_max = MAX_BD_COUNT,
203 .nb_min = MIN_BD_COUNT,
204 .nb_align = BD_ALIGN,
206 dev_info->max_rx_queues = MAX_RX_RINGS;
207 dev_info->max_tx_queues = MAX_TX_RINGS;
208 dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
209 dev_info->rx_offload_capa =
210 (DEV_RX_OFFLOAD_IPV4_CKSUM |
211 DEV_RX_OFFLOAD_UDP_CKSUM |
212 DEV_RX_OFFLOAD_TCP_CKSUM |
213 DEV_RX_OFFLOAD_KEEP_CRC |
214 DEV_RX_OFFLOAD_JUMBO_FRAME);
220 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
224 size = nb_desc * sizeof(struct enetc_swbd);
225 txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
226 if (txr->q_swbd == NULL)
229 size = nb_desc * sizeof(struct enetc_tx_bd);
230 txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
231 if (txr->bd_base == NULL) {
232 rte_free(txr->q_swbd);
237 txr->bd_count = nb_desc;
238 txr->next_to_clean = 0;
239 txr->next_to_use = 0;
245 enetc_free_bdr(struct enetc_bdr *rxr)
247 rte_free(rxr->q_swbd);
248 rte_free(rxr->bd_base);
254 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
256 int idx = tx_ring->index;
257 phys_addr_t bd_address;
259 bd_address = (phys_addr_t)
260 rte_mem_virt2iova((const void *)tx_ring->bd_base);
261 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
262 lower_32_bits((uint64_t)bd_address));
263 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
264 upper_32_bits((uint64_t)bd_address));
265 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
266 ENETC_RTBLENR_LEN(tx_ring->bd_count));
268 enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
269 enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
270 tx_ring->tcir = (void *)((size_t)hw->reg +
271 ENETC_BDR(TX, idx, ENETC_TBCIR));
272 tx_ring->tcisr = (void *)((size_t)hw->reg +
273 ENETC_BDR(TX, idx, ENETC_TBCISR));
277 enetc_tx_queue_setup(struct rte_eth_dev *dev,
280 unsigned int socket_id __rte_unused,
281 const struct rte_eth_txconf *tx_conf)
284 struct enetc_bdr *tx_ring;
285 struct rte_eth_dev_data *data = dev->data;
286 struct enetc_eth_adapter *priv =
287 ENETC_DEV_PRIVATE(data->dev_private);
289 PMD_INIT_FUNC_TRACE();
290 if (nb_desc > MAX_BD_COUNT)
293 tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
294 if (tx_ring == NULL) {
295 ENETC_PMD_ERR("Failed to allocate TX ring memory");
300 err = enetc_alloc_txbdr(tx_ring, nb_desc);
304 tx_ring->index = queue_idx;
306 enetc_setup_txbdr(&priv->hw.hw, tx_ring);
307 data->tx_queues[queue_idx] = tx_ring;
309 if (!tx_conf->tx_deferred_start) {
311 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
312 ENETC_TBMR, ENETC_TBMR_EN);
313 dev->data->tx_queue_state[tx_ring->index] =
314 RTE_ETH_QUEUE_STATE_STARTED;
316 dev->data->tx_queue_state[tx_ring->index] =
317 RTE_ETH_QUEUE_STATE_STOPPED;
328 enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
330 void *txq = dev->data->tx_queues[qid];
335 struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
336 struct enetc_eth_hw *eth_hw =
337 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
339 struct enetc_swbd *tx_swbd;
343 /* Disable the ring */
345 val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
346 val &= (~ENETC_TBMR_EN);
347 enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
350 i = tx_ring->next_to_clean;
351 tx_swbd = &tx_ring->q_swbd[i];
352 while (tx_swbd->buffer_addr != NULL) {
353 rte_pktmbuf_free(tx_swbd->buffer_addr);
354 tx_swbd->buffer_addr = NULL;
357 if (unlikely(i == tx_ring->bd_count)) {
359 tx_swbd = &tx_ring->q_swbd[i];
363 enetc_free_bdr(tx_ring);
368 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
373 size = nb_rx_desc * sizeof(struct enetc_swbd);
374 rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
375 if (rxr->q_swbd == NULL)
378 size = nb_rx_desc * sizeof(union enetc_rx_bd);
379 rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
380 if (rxr->bd_base == NULL) {
381 rte_free(rxr->q_swbd);
386 rxr->bd_count = nb_rx_desc;
387 rxr->next_to_clean = 0;
388 rxr->next_to_use = 0;
389 rxr->next_to_alloc = 0;
395 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
396 struct rte_mempool *mb_pool)
398 int idx = rx_ring->index;
400 phys_addr_t bd_address;
402 bd_address = (phys_addr_t)
403 rte_mem_virt2iova((const void *)rx_ring->bd_base);
404 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
405 lower_32_bits((uint64_t)bd_address));
406 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
407 upper_32_bits((uint64_t)bd_address));
408 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
409 ENETC_RTBLENR_LEN(rx_ring->bd_count));
411 rx_ring->mb_pool = mb_pool;
412 rx_ring->rcir = (void *)((size_t)hw->reg +
413 ENETC_BDR(RX, idx, ENETC_RBCIR));
414 enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
415 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
416 RTE_PKTMBUF_HEADROOM);
417 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
418 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
422 enetc_rx_queue_setup(struct rte_eth_dev *dev,
423 uint16_t rx_queue_id,
425 unsigned int socket_id __rte_unused,
426 const struct rte_eth_rxconf *rx_conf,
427 struct rte_mempool *mb_pool)
430 struct enetc_bdr *rx_ring;
431 struct rte_eth_dev_data *data = dev->data;
432 struct enetc_eth_adapter *adapter =
433 ENETC_DEV_PRIVATE(data->dev_private);
434 uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
436 PMD_INIT_FUNC_TRACE();
437 if (nb_rx_desc > MAX_BD_COUNT)
440 rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
441 if (rx_ring == NULL) {
442 ENETC_PMD_ERR("Failed to allocate RX ring memory");
447 err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
451 rx_ring->index = rx_queue_id;
453 enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
454 data->rx_queues[rx_queue_id] = rx_ring;
456 if (!rx_conf->rx_deferred_start) {
458 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
460 dev->data->rx_queue_state[rx_ring->index] =
461 RTE_ETH_QUEUE_STATE_STARTED;
463 dev->data->rx_queue_state[rx_ring->index] =
464 RTE_ETH_QUEUE_STATE_STOPPED;
467 rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
468 RTE_ETHER_CRC_LEN : 0);
478 enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
480 void *rxq = dev->data->rx_queues[qid];
485 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
486 struct enetc_eth_hw *eth_hw =
487 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
488 struct enetc_swbd *q_swbd;
493 /* Disable the ring */
495 val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
496 val &= (~ENETC_RBMR_EN);
497 enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
500 i = rx_ring->next_to_clean;
501 q_swbd = &rx_ring->q_swbd[i];
502 while (i != rx_ring->next_to_use) {
503 rte_pktmbuf_free(q_swbd->buffer_addr);
504 q_swbd->buffer_addr = NULL;
507 if (unlikely(i == rx_ring->bd_count)) {
509 q_swbd = &rx_ring->q_swbd[i];
513 enetc_free_bdr(rx_ring);
518 int enetc_stats_get(struct rte_eth_dev *dev,
519 struct rte_eth_stats *stats)
521 struct enetc_eth_hw *hw =
522 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
523 struct enetc_hw *enetc_hw = &hw->hw;
525 /* Total received packets, bad + good, if we want to get counters of
526 * only good received packets then use ENETC_PM0_RFRM,
527 * ENETC_PM0_TFRM registers.
529 stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
530 stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
531 stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
532 stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
533 /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
536 stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
537 stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
538 stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
544 enetc_stats_reset(struct rte_eth_dev *dev)
546 struct enetc_eth_hw *hw =
547 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
548 struct enetc_hw *enetc_hw = &hw->hw;
550 enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
556 enetc_dev_close(struct rte_eth_dev *dev)
561 PMD_INIT_FUNC_TRACE();
562 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
565 ret = enetc_dev_stop(dev);
567 for (i = 0; i < dev->data->nb_rx_queues; i++) {
568 enetc_rx_queue_release(dev, i);
569 dev->data->rx_queues[i] = NULL;
571 dev->data->nb_rx_queues = 0;
573 for (i = 0; i < dev->data->nb_tx_queues; i++) {
574 enetc_tx_queue_release(dev, i);
575 dev->data->tx_queues[i] = NULL;
577 dev->data->nb_tx_queues = 0;
579 if (rte_eal_iova_mode() == RTE_IOVA_PA)
580 dpaax_iova_table_depopulate();
586 enetc_promiscuous_enable(struct rte_eth_dev *dev)
588 struct enetc_eth_hw *hw =
589 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
590 struct enetc_hw *enetc_hw = &hw->hw;
593 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
595 /* Setting to enable promiscuous mode*/
596 psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
598 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
604 enetc_promiscuous_disable(struct rte_eth_dev *dev)
606 struct enetc_eth_hw *hw =
607 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608 struct enetc_hw *enetc_hw = &hw->hw;
611 /* Setting to disable promiscuous mode for SI0*/
612 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
613 psipmr &= (~ENETC_PSIPMR_SET_UP(0));
615 if (dev->data->all_multicast == 0)
616 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
618 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
624 enetc_allmulticast_enable(struct rte_eth_dev *dev)
626 struct enetc_eth_hw *hw =
627 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
628 struct enetc_hw *enetc_hw = &hw->hw;
631 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
633 /* Setting to enable allmulticast mode for SI0*/
634 psipmr |= ENETC_PSIPMR_SET_MP(0);
636 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
642 enetc_allmulticast_disable(struct rte_eth_dev *dev)
644 struct enetc_eth_hw *hw =
645 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
646 struct enetc_hw *enetc_hw = &hw->hw;
649 if (dev->data->promiscuous == 1)
650 return 0; /* must remain in all_multicast mode */
652 /* Setting to disable all multicast mode for SI0*/
653 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
654 ~(ENETC_PSIPMR_SET_MP(0));
656 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
662 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
664 struct enetc_eth_hw *hw =
665 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
666 struct enetc_hw *enetc_hw = &hw->hw;
667 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
669 /* check that mtu is within the allowed range */
670 if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
674 * Refuse mtu that requires the support of scattered packets
675 * when this feature has not been enabled before.
677 if (dev->data->min_rx_buf_size &&
678 !dev->data->scattered_rx && frame_size >
679 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
680 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
684 if (frame_size > ENETC_ETH_MAX_LEN)
685 dev->data->dev_conf.rxmode.offloads &=
686 DEV_RX_OFFLOAD_JUMBO_FRAME;
688 dev->data->dev_conf.rxmode.offloads &=
689 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
691 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
692 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
694 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
697 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
698 ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
704 enetc_dev_configure(struct rte_eth_dev *dev)
706 struct enetc_eth_hw *hw =
707 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
708 struct enetc_hw *enetc_hw = &hw->hw;
709 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
710 uint64_t rx_offloads = eth_conf->rxmode.offloads;
711 uint32_t checksum = L3_CKSUM | L4_CKSUM;
713 PMD_INIT_FUNC_TRACE();
715 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
718 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
720 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
721 ENETC_SET_MAXFRM(max_len));
722 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
723 ENETC_MAC_MAXFRM_SIZE);
724 enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
725 2 * ENETC_MAC_MAXFRM_SIZE);
726 dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
730 if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
733 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
734 config |= ENETC_PM0_CRC;
735 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
738 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
739 checksum &= ~L3_CKSUM;
741 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
742 checksum &= ~L4_CKSUM;
744 enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
751 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
753 struct enetc_eth_adapter *priv =
754 ENETC_DEV_PRIVATE(dev->data->dev_private);
755 struct enetc_bdr *rx_ring;
758 rx_ring = dev->data->rx_queues[qidx];
759 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
760 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
762 rx_data = rx_data | ENETC_RBMR_EN;
763 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
765 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
772 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
774 struct enetc_eth_adapter *priv =
775 ENETC_DEV_PRIVATE(dev->data->dev_private);
776 struct enetc_bdr *rx_ring;
779 rx_ring = dev->data->rx_queues[qidx];
780 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
781 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
783 rx_data = rx_data & (~ENETC_RBMR_EN);
784 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
786 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
793 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
795 struct enetc_eth_adapter *priv =
796 ENETC_DEV_PRIVATE(dev->data->dev_private);
797 struct enetc_bdr *tx_ring;
800 tx_ring = dev->data->tx_queues[qidx];
801 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
802 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
804 tx_data = tx_data | ENETC_TBMR_EN;
805 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
807 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
814 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
816 struct enetc_eth_adapter *priv =
817 ENETC_DEV_PRIVATE(dev->data->dev_private);
818 struct enetc_bdr *tx_ring;
821 tx_ring = dev->data->tx_queues[qidx];
822 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
823 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
825 tx_data = tx_data & (~ENETC_TBMR_EN);
826 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
828 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
835 * The set of PCI devices this driver supports
837 static const struct rte_pci_id pci_id_enetc_map[] = {
838 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
839 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
840 { .vendor_id = 0, /* sentinel */ },
843 /* Features supported by this driver */
844 static const struct eth_dev_ops enetc_ops = {
845 .dev_configure = enetc_dev_configure,
846 .dev_start = enetc_dev_start,
847 .dev_stop = enetc_dev_stop,
848 .dev_close = enetc_dev_close,
849 .link_update = enetc_link_update,
850 .stats_get = enetc_stats_get,
851 .stats_reset = enetc_stats_reset,
852 .promiscuous_enable = enetc_promiscuous_enable,
853 .promiscuous_disable = enetc_promiscuous_disable,
854 .allmulticast_enable = enetc_allmulticast_enable,
855 .allmulticast_disable = enetc_allmulticast_disable,
856 .dev_infos_get = enetc_dev_infos_get,
857 .mtu_set = enetc_mtu_set,
858 .rx_queue_setup = enetc_rx_queue_setup,
859 .rx_queue_start = enetc_rx_queue_start,
860 .rx_queue_stop = enetc_rx_queue_stop,
861 .rx_queue_release = enetc_rx_queue_release,
862 .tx_queue_setup = enetc_tx_queue_setup,
863 .tx_queue_start = enetc_tx_queue_start,
864 .tx_queue_stop = enetc_tx_queue_stop,
865 .tx_queue_release = enetc_tx_queue_release,
866 .dev_supported_ptypes_get = enetc_supported_ptypes_get,
870 * Initialisation of the enetc device
873 * - Pointer to the structure rte_eth_dev
876 * - On success, zero.
877 * - On failure, negative value.
880 enetc_dev_init(struct rte_eth_dev *eth_dev)
883 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
884 struct enetc_eth_hw *hw =
885 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
887 PMD_INIT_FUNC_TRACE();
888 eth_dev->dev_ops = &enetc_ops;
889 eth_dev->rx_pkt_burst = &enetc_recv_pkts;
890 eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
892 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
894 /* Retrieving and storing the HW base address of device */
895 hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
896 hw->device_id = pci_dev->id.device_id;
898 error = enetc_hardware_init(hw);
900 ENETC_PMD_ERR("Hardware initialization failed");
904 /* Allocate memory for storing MAC addresses */
905 eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
906 RTE_ETHER_ADDR_LEN, 0);
907 if (!eth_dev->data->mac_addrs) {
908 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
909 "store MAC addresses",
910 RTE_ETHER_ADDR_LEN * 1);
915 /* Copy the permanent MAC address */
916 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
917 ð_dev->data->mac_addrs[0]);
920 enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
921 ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
922 eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
925 if (rte_eal_iova_mode() == RTE_IOVA_PA)
926 dpaax_iova_table_populate();
928 ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
929 eth_dev->data->port_id, pci_dev->id.vendor_id,
930 pci_dev->id.device_id);
935 enetc_dev_uninit(struct rte_eth_dev *eth_dev)
937 PMD_INIT_FUNC_TRACE();
939 return enetc_dev_close(eth_dev);
943 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
944 struct rte_pci_device *pci_dev)
946 return rte_eth_dev_pci_generic_probe(pci_dev,
947 sizeof(struct enetc_eth_adapter),
952 enetc_pci_remove(struct rte_pci_device *pci_dev)
954 return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
957 static struct rte_pci_driver rte_enetc_pmd = {
958 .id_table = pci_id_enetc_map,
959 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
960 .probe = enetc_pci_probe,
961 .remove = enetc_pci_remove,
964 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
965 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
966 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
967 RTE_LOG_REGISTER_DEFAULT(enetc_logtype_pmd, NOTICE);