1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
6 #include <rte_ethdev_pci.h>
7 #include <rte_random.h>
8 #include <dpaax_iova_table.h>
10 #include "enetc_logs.h"
13 int enetc_logtype_pmd;
16 enetc_dev_start(struct rte_eth_dev *dev)
18 struct enetc_eth_hw *hw =
19 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
20 struct enetc_hw *enetc_hw = &hw->hw;
23 PMD_INIT_FUNC_TRACE();
24 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
25 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
26 val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
29 val = enetc_port_rd(enetc_hw, ENETC_PMR);
30 enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
32 /* set auto-speed for RGMII */
33 if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
34 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
35 ENETC_PM0_IFM_RGAUTO);
36 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
37 ENETC_PM0_IFM_RGAUTO);
39 if (enetc_global_rd(enetc_hw,
40 ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
41 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
43 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
51 enetc_dev_stop(struct rte_eth_dev *dev)
53 struct enetc_eth_hw *hw =
54 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
55 struct enetc_hw *enetc_hw = &hw->hw;
58 PMD_INIT_FUNC_TRACE();
60 val = enetc_port_rd(enetc_hw, ENETC_PMR);
61 enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
63 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
64 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
65 val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
68 static const uint32_t *
69 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
71 static const uint32_t ptypes[] = {
85 /* return 0 means link status changed, -1 means not changed */
87 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
89 struct enetc_eth_hw *hw =
90 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
91 struct enetc_hw *enetc_hw = &hw->hw;
92 struct rte_eth_link link;
95 PMD_INIT_FUNC_TRACE();
97 memset(&link, 0, sizeof(link));
99 status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
101 if (status & ENETC_LINK_MODE)
102 link.link_duplex = ETH_LINK_FULL_DUPLEX;
104 link.link_duplex = ETH_LINK_HALF_DUPLEX;
106 if (status & ENETC_LINK_STATUS)
107 link.link_status = ETH_LINK_UP;
109 link.link_status = ETH_LINK_DOWN;
111 switch (status & ENETC_LINK_SPEED_MASK) {
112 case ENETC_LINK_SPEED_1G:
113 link.link_speed = ETH_SPEED_NUM_1G;
116 case ENETC_LINK_SPEED_100M:
117 link.link_speed = ETH_SPEED_NUM_100M;
121 case ENETC_LINK_SPEED_10M:
122 link.link_speed = ETH_SPEED_NUM_10M;
125 return rte_eth_linkstatus_set(dev, &link);
129 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
131 char buf[RTE_ETHER_ADDR_FMT_SIZE];
133 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
134 ENETC_PMD_NOTICE("%s%s\n", name, buf);
138 enetc_hardware_init(struct enetc_eth_hw *hw)
140 struct enetc_hw *enetc_hw = &hw->hw;
141 uint32_t *mac = (uint32_t *)hw->mac.addr;
142 uint32_t high_mac = 0;
143 uint16_t low_mac = 0;
145 PMD_INIT_FUNC_TRACE();
146 /* Calculating and storing the base HW addresses */
147 hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
148 hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
150 /* Enabling Station Interface */
151 enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
153 *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
154 high_mac = (uint32_t)*mac;
156 *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
157 low_mac = (uint16_t)*mac;
159 if ((high_mac | low_mac) == 0) {
162 ENETC_PMD_NOTICE("MAC is not available for this SI, "
164 mac = (uint32_t *)hw->mac.addr;
165 *mac = (uint32_t)rte_rand();
166 first_byte = (char *)mac;
167 *first_byte &= 0xfe; /* clear multicast bit */
168 *first_byte |= 0x02; /* set local assignment bit (IEEE802) */
170 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
172 *mac = (uint16_t)rte_rand();
173 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
174 print_ethaddr("New address: ",
175 (const struct rte_ether_addr *)hw->mac.addr);
182 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
183 struct rte_eth_dev_info *dev_info)
185 PMD_INIT_FUNC_TRACE();
186 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
187 .nb_max = MAX_BD_COUNT,
188 .nb_min = MIN_BD_COUNT,
189 .nb_align = BD_ALIGN,
191 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
192 .nb_max = MAX_BD_COUNT,
193 .nb_min = MIN_BD_COUNT,
194 .nb_align = BD_ALIGN,
196 dev_info->max_rx_queues = MAX_RX_RINGS;
197 dev_info->max_tx_queues = MAX_TX_RINGS;
198 dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
199 dev_info->rx_offload_capa =
200 (DEV_RX_OFFLOAD_IPV4_CKSUM |
201 DEV_RX_OFFLOAD_UDP_CKSUM |
202 DEV_RX_OFFLOAD_TCP_CKSUM |
203 DEV_RX_OFFLOAD_KEEP_CRC |
204 DEV_RX_OFFLOAD_JUMBO_FRAME);
210 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
214 size = nb_desc * sizeof(struct enetc_swbd);
215 txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
216 if (txr->q_swbd == NULL)
219 size = nb_desc * sizeof(struct enetc_tx_bd);
220 txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
221 if (txr->bd_base == NULL) {
222 rte_free(txr->q_swbd);
227 txr->bd_count = nb_desc;
228 txr->next_to_clean = 0;
229 txr->next_to_use = 0;
235 enetc_free_bdr(struct enetc_bdr *rxr)
237 rte_free(rxr->q_swbd);
238 rte_free(rxr->bd_base);
244 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
246 int idx = tx_ring->index;
247 phys_addr_t bd_address;
249 bd_address = (phys_addr_t)
250 rte_mem_virt2iova((const void *)tx_ring->bd_base);
251 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
252 lower_32_bits((uint64_t)bd_address));
253 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
254 upper_32_bits((uint64_t)bd_address));
255 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
256 ENETC_RTBLENR_LEN(tx_ring->bd_count));
258 enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
259 enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
260 tx_ring->tcir = (void *)((size_t)hw->reg +
261 ENETC_BDR(TX, idx, ENETC_TBCIR));
262 tx_ring->tcisr = (void *)((size_t)hw->reg +
263 ENETC_BDR(TX, idx, ENETC_TBCISR));
267 enetc_tx_queue_setup(struct rte_eth_dev *dev,
270 unsigned int socket_id __rte_unused,
271 const struct rte_eth_txconf *tx_conf)
274 struct enetc_bdr *tx_ring;
275 struct rte_eth_dev_data *data = dev->data;
276 struct enetc_eth_adapter *priv =
277 ENETC_DEV_PRIVATE(data->dev_private);
279 PMD_INIT_FUNC_TRACE();
280 if (nb_desc > MAX_BD_COUNT)
283 tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
284 if (tx_ring == NULL) {
285 ENETC_PMD_ERR("Failed to allocate TX ring memory");
290 err = enetc_alloc_txbdr(tx_ring, nb_desc);
294 tx_ring->index = queue_idx;
296 enetc_setup_txbdr(&priv->hw.hw, tx_ring);
297 data->tx_queues[queue_idx] = tx_ring;
299 if (!tx_conf->tx_deferred_start) {
301 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
302 ENETC_TBMR, ENETC_TBMR_EN);
303 dev->data->tx_queue_state[tx_ring->index] =
304 RTE_ETH_QUEUE_STATE_STARTED;
306 dev->data->tx_queue_state[tx_ring->index] =
307 RTE_ETH_QUEUE_STATE_STOPPED;
318 enetc_tx_queue_release(void *txq)
323 struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
324 struct enetc_eth_hw *eth_hw =
325 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
327 struct enetc_swbd *tx_swbd;
331 /* Disable the ring */
333 val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
334 val &= (~ENETC_TBMR_EN);
335 enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
338 i = tx_ring->next_to_clean;
339 tx_swbd = &tx_ring->q_swbd[i];
340 while (tx_swbd->buffer_addr != NULL) {
341 rte_pktmbuf_free(tx_swbd->buffer_addr);
342 tx_swbd->buffer_addr = NULL;
345 if (unlikely(i == tx_ring->bd_count)) {
347 tx_swbd = &tx_ring->q_swbd[i];
351 enetc_free_bdr(tx_ring);
356 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
361 size = nb_rx_desc * sizeof(struct enetc_swbd);
362 rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
363 if (rxr->q_swbd == NULL)
366 size = nb_rx_desc * sizeof(union enetc_rx_bd);
367 rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
368 if (rxr->bd_base == NULL) {
369 rte_free(rxr->q_swbd);
374 rxr->bd_count = nb_rx_desc;
375 rxr->next_to_clean = 0;
376 rxr->next_to_use = 0;
377 rxr->next_to_alloc = 0;
383 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
384 struct rte_mempool *mb_pool)
386 int idx = rx_ring->index;
388 phys_addr_t bd_address;
390 bd_address = (phys_addr_t)
391 rte_mem_virt2iova((const void *)rx_ring->bd_base);
392 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
393 lower_32_bits((uint64_t)bd_address));
394 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
395 upper_32_bits((uint64_t)bd_address));
396 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
397 ENETC_RTBLENR_LEN(rx_ring->bd_count));
399 rx_ring->mb_pool = mb_pool;
400 rx_ring->rcir = (void *)((size_t)hw->reg +
401 ENETC_BDR(RX, idx, ENETC_RBCIR));
402 enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
403 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
404 RTE_PKTMBUF_HEADROOM);
405 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
406 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
410 enetc_rx_queue_setup(struct rte_eth_dev *dev,
411 uint16_t rx_queue_id,
413 unsigned int socket_id __rte_unused,
414 const struct rte_eth_rxconf *rx_conf,
415 struct rte_mempool *mb_pool)
418 struct enetc_bdr *rx_ring;
419 struct rte_eth_dev_data *data = dev->data;
420 struct enetc_eth_adapter *adapter =
421 ENETC_DEV_PRIVATE(data->dev_private);
422 uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
424 PMD_INIT_FUNC_TRACE();
425 if (nb_rx_desc > MAX_BD_COUNT)
428 rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
429 if (rx_ring == NULL) {
430 ENETC_PMD_ERR("Failed to allocate RX ring memory");
435 err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
439 rx_ring->index = rx_queue_id;
441 enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
442 data->rx_queues[rx_queue_id] = rx_ring;
444 if (!rx_conf->rx_deferred_start) {
446 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
448 dev->data->rx_queue_state[rx_ring->index] =
449 RTE_ETH_QUEUE_STATE_STARTED;
451 dev->data->rx_queue_state[rx_ring->index] =
452 RTE_ETH_QUEUE_STATE_STOPPED;
455 rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
456 RTE_ETHER_CRC_LEN : 0);
466 enetc_rx_queue_release(void *rxq)
471 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
472 struct enetc_eth_hw *eth_hw =
473 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
474 struct enetc_swbd *q_swbd;
479 /* Disable the ring */
481 val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
482 val &= (~ENETC_RBMR_EN);
483 enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
486 i = rx_ring->next_to_clean;
487 q_swbd = &rx_ring->q_swbd[i];
488 while (i != rx_ring->next_to_use) {
489 rte_pktmbuf_free(q_swbd->buffer_addr);
490 q_swbd->buffer_addr = NULL;
493 if (unlikely(i == rx_ring->bd_count)) {
495 q_swbd = &rx_ring->q_swbd[i];
499 enetc_free_bdr(rx_ring);
504 int enetc_stats_get(struct rte_eth_dev *dev,
505 struct rte_eth_stats *stats)
507 struct enetc_eth_hw *hw =
508 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
509 struct enetc_hw *enetc_hw = &hw->hw;
511 /* Total received packets, bad + good, if we want to get counters of
512 * only good received packets then use ENETC_PM0_RFRM,
513 * ENETC_PM0_TFRM registers.
515 stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
516 stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
517 stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
518 stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
519 /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
522 stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
523 stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
524 stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
530 enetc_stats_reset(struct rte_eth_dev *dev)
532 struct enetc_eth_hw *hw =
533 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
534 struct enetc_hw *enetc_hw = &hw->hw;
536 enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
542 enetc_dev_close(struct rte_eth_dev *dev)
546 PMD_INIT_FUNC_TRACE();
549 for (i = 0; i < dev->data->nb_rx_queues; i++) {
550 enetc_rx_queue_release(dev->data->rx_queues[i]);
551 dev->data->rx_queues[i] = NULL;
553 dev->data->nb_rx_queues = 0;
555 for (i = 0; i < dev->data->nb_tx_queues; i++) {
556 enetc_tx_queue_release(dev->data->tx_queues[i]);
557 dev->data->tx_queues[i] = NULL;
559 dev->data->nb_tx_queues = 0;
563 enetc_promiscuous_enable(struct rte_eth_dev *dev)
565 struct enetc_eth_hw *hw =
566 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
567 struct enetc_hw *enetc_hw = &hw->hw;
570 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
572 /* Setting to enable promiscuous mode*/
573 psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
575 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
581 enetc_promiscuous_disable(struct rte_eth_dev *dev)
583 struct enetc_eth_hw *hw =
584 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
585 struct enetc_hw *enetc_hw = &hw->hw;
588 /* Setting to disable promiscuous mode for SI0*/
589 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
590 psipmr &= (~ENETC_PSIPMR_SET_UP(0));
592 if (dev->data->all_multicast == 0)
593 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
595 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
601 enetc_allmulticast_enable(struct rte_eth_dev *dev)
603 struct enetc_eth_hw *hw =
604 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
605 struct enetc_hw *enetc_hw = &hw->hw;
608 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
610 /* Setting to enable allmulticast mode for SI0*/
611 psipmr |= ENETC_PSIPMR_SET_MP(0);
613 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
619 enetc_allmulticast_disable(struct rte_eth_dev *dev)
621 struct enetc_eth_hw *hw =
622 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
623 struct enetc_hw *enetc_hw = &hw->hw;
626 if (dev->data->promiscuous == 1)
627 return 0; /* must remain in all_multicast mode */
629 /* Setting to disable all multicast mode for SI0*/
630 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
631 ~(ENETC_PSIPMR_SET_MP(0));
633 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
639 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
641 struct enetc_eth_hw *hw =
642 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
643 struct enetc_hw *enetc_hw = &hw->hw;
644 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
646 /* check that mtu is within the allowed range */
647 if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
651 * Refuse mtu that requires the support of scattered packets
652 * when this feature has not been enabled before.
654 if (dev->data->min_rx_buf_size &&
655 !dev->data->scattered_rx && frame_size >
656 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
657 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
661 if (frame_size > RTE_ETHER_MAX_LEN)
662 dev->data->dev_conf.rxmode.offloads &=
663 DEV_RX_OFFLOAD_JUMBO_FRAME;
665 dev->data->dev_conf.rxmode.offloads &=
666 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
668 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
669 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
671 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
674 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
675 ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
681 enetc_dev_configure(struct rte_eth_dev *dev)
683 struct enetc_eth_hw *hw =
684 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
685 struct enetc_hw *enetc_hw = &hw->hw;
686 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
687 uint64_t rx_offloads = eth_conf->rxmode.offloads;
688 uint32_t checksum = L3_CKSUM | L4_CKSUM;
690 PMD_INIT_FUNC_TRACE();
692 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
695 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
697 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
698 ENETC_SET_MAXFRM(max_len));
699 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
700 ENETC_MAC_MAXFRM_SIZE);
701 enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
702 2 * ENETC_MAC_MAXFRM_SIZE);
703 dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
707 if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
710 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
711 config |= ENETC_PM0_CRC;
712 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
715 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
716 checksum &= ~L3_CKSUM;
718 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
719 checksum &= ~L4_CKSUM;
721 enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
728 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
730 struct enetc_eth_adapter *priv =
731 ENETC_DEV_PRIVATE(dev->data->dev_private);
732 struct enetc_bdr *rx_ring;
735 rx_ring = dev->data->rx_queues[qidx];
736 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
737 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
739 rx_data = rx_data | ENETC_RBMR_EN;
740 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
742 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
749 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
751 struct enetc_eth_adapter *priv =
752 ENETC_DEV_PRIVATE(dev->data->dev_private);
753 struct enetc_bdr *rx_ring;
756 rx_ring = dev->data->rx_queues[qidx];
757 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
758 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
760 rx_data = rx_data & (~ENETC_RBMR_EN);
761 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
763 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
770 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
772 struct enetc_eth_adapter *priv =
773 ENETC_DEV_PRIVATE(dev->data->dev_private);
774 struct enetc_bdr *tx_ring;
777 tx_ring = dev->data->tx_queues[qidx];
778 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
779 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
781 tx_data = tx_data | ENETC_TBMR_EN;
782 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
784 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
791 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
793 struct enetc_eth_adapter *priv =
794 ENETC_DEV_PRIVATE(dev->data->dev_private);
795 struct enetc_bdr *tx_ring;
798 tx_ring = dev->data->tx_queues[qidx];
799 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
800 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
802 tx_data = tx_data & (~ENETC_TBMR_EN);
803 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
805 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
812 * The set of PCI devices this driver supports
814 static const struct rte_pci_id pci_id_enetc_map[] = {
815 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
816 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
817 { .vendor_id = 0, /* sentinel */ },
820 /* Features supported by this driver */
821 static const struct eth_dev_ops enetc_ops = {
822 .dev_configure = enetc_dev_configure,
823 .dev_start = enetc_dev_start,
824 .dev_stop = enetc_dev_stop,
825 .dev_close = enetc_dev_close,
826 .link_update = enetc_link_update,
827 .stats_get = enetc_stats_get,
828 .stats_reset = enetc_stats_reset,
829 .promiscuous_enable = enetc_promiscuous_enable,
830 .promiscuous_disable = enetc_promiscuous_disable,
831 .allmulticast_enable = enetc_allmulticast_enable,
832 .allmulticast_disable = enetc_allmulticast_disable,
833 .dev_infos_get = enetc_dev_infos_get,
834 .mtu_set = enetc_mtu_set,
835 .rx_queue_setup = enetc_rx_queue_setup,
836 .rx_queue_start = enetc_rx_queue_start,
837 .rx_queue_stop = enetc_rx_queue_stop,
838 .rx_queue_release = enetc_rx_queue_release,
839 .tx_queue_setup = enetc_tx_queue_setup,
840 .tx_queue_start = enetc_tx_queue_start,
841 .tx_queue_stop = enetc_tx_queue_stop,
842 .tx_queue_release = enetc_tx_queue_release,
843 .dev_supported_ptypes_get = enetc_supported_ptypes_get,
847 * Initialisation of the enetc device
850 * - Pointer to the structure rte_eth_dev
853 * - On success, zero.
854 * - On failure, negative value.
857 enetc_dev_init(struct rte_eth_dev *eth_dev)
860 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
861 struct enetc_eth_hw *hw =
862 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
864 PMD_INIT_FUNC_TRACE();
865 eth_dev->dev_ops = &enetc_ops;
866 eth_dev->rx_pkt_burst = &enetc_recv_pkts;
867 eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
869 /* Retrieving and storing the HW base address of device */
870 hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
871 hw->device_id = pci_dev->id.device_id;
873 error = enetc_hardware_init(hw);
875 ENETC_PMD_ERR("Hardware initialization failed");
879 /* Allocate memory for storing MAC addresses */
880 eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
881 RTE_ETHER_ADDR_LEN, 0);
882 if (!eth_dev->data->mac_addrs) {
883 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
884 "store MAC addresses",
885 RTE_ETHER_ADDR_LEN * 1);
890 /* Copy the permanent MAC address */
891 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
892 ð_dev->data->mac_addrs[0]);
895 enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
896 ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
897 eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
900 if (rte_eal_iova_mode() == RTE_IOVA_PA)
901 dpaax_iova_table_populate();
903 ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
904 eth_dev->data->port_id, pci_dev->id.vendor_id,
905 pci_dev->id.device_id);
910 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
912 PMD_INIT_FUNC_TRACE();
914 if (rte_eal_iova_mode() == RTE_IOVA_PA)
915 dpaax_iova_table_depopulate();
921 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
922 struct rte_pci_device *pci_dev)
924 return rte_eth_dev_pci_generic_probe(pci_dev,
925 sizeof(struct enetc_eth_adapter),
930 enetc_pci_remove(struct rte_pci_device *pci_dev)
932 return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
935 static struct rte_pci_driver rte_enetc_pmd = {
936 .id_table = pci_id_enetc_map,
937 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
938 .probe = enetc_pci_probe,
939 .remove = enetc_pci_remove,
942 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
943 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
944 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
946 RTE_INIT(enetc_pmd_init_log)
948 enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
949 if (enetc_logtype_pmd >= 0)
950 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);