1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
6 #include <ethdev_pci.h>
7 #include <rte_random.h>
8 #include <dpaax_iova_table.h>
10 #include "enetc_logs.h"
14 enetc_dev_start(struct rte_eth_dev *dev)
16 struct enetc_eth_hw *hw =
17 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18 struct enetc_hw *enetc_hw = &hw->hw;
21 PMD_INIT_FUNC_TRACE();
22 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
23 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
24 val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
27 val = enetc_port_rd(enetc_hw, ENETC_PMR);
28 enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
30 /* set auto-speed for RGMII */
31 if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
32 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
33 ENETC_PM0_IFM_RGAUTO);
34 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
35 ENETC_PM0_IFM_RGAUTO);
37 if (enetc_global_rd(enetc_hw,
38 ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
39 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
41 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
49 enetc_dev_stop(struct rte_eth_dev *dev)
51 struct enetc_eth_hw *hw =
52 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53 struct enetc_hw *enetc_hw = &hw->hw;
56 PMD_INIT_FUNC_TRACE();
57 dev->data->dev_started = 0;
59 val = enetc_port_rd(enetc_hw, ENETC_PMR);
60 enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
62 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
63 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
64 val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
69 static const uint32_t *
70 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
72 static const uint32_t ptypes[] = {
86 /* return 0 means link status changed, -1 means not changed */
88 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
90 struct enetc_eth_hw *hw =
91 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
92 struct enetc_hw *enetc_hw = &hw->hw;
93 struct rte_eth_link link;
96 PMD_INIT_FUNC_TRACE();
98 memset(&link, 0, sizeof(link));
100 status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
102 if (status & ENETC_LINK_MODE)
103 link.link_duplex = ETH_LINK_FULL_DUPLEX;
105 link.link_duplex = ETH_LINK_HALF_DUPLEX;
107 if (status & ENETC_LINK_STATUS)
108 link.link_status = ETH_LINK_UP;
110 link.link_status = ETH_LINK_DOWN;
112 switch (status & ENETC_LINK_SPEED_MASK) {
113 case ENETC_LINK_SPEED_1G:
114 link.link_speed = ETH_SPEED_NUM_1G;
117 case ENETC_LINK_SPEED_100M:
118 link.link_speed = ETH_SPEED_NUM_100M;
122 case ENETC_LINK_SPEED_10M:
123 link.link_speed = ETH_SPEED_NUM_10M;
126 return rte_eth_linkstatus_set(dev, &link);
130 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
132 char buf[RTE_ETHER_ADDR_FMT_SIZE];
134 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
135 ENETC_PMD_NOTICE("%s%s\n", name, buf);
139 enetc_hardware_init(struct enetc_eth_hw *hw)
141 struct enetc_hw *enetc_hw = &hw->hw;
142 uint32_t *mac = (uint32_t *)hw->mac.addr;
143 uint32_t high_mac = 0;
144 uint16_t low_mac = 0;
146 PMD_INIT_FUNC_TRACE();
147 /* Calculating and storing the base HW addresses */
148 hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
149 hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
151 /* WA for Rx lock-up HW erratum */
152 enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1);
154 /* set ENETC transaction flags to coherent, don't allocate.
155 * BD writes merge with surrounding cache line data, frame data writes
156 * overwrite cache line.
158 enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT);
160 /* Enabling Station Interface */
161 enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
163 *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
164 high_mac = (uint32_t)*mac;
166 *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
167 low_mac = (uint16_t)*mac;
169 if ((high_mac | low_mac) == 0) {
172 ENETC_PMD_NOTICE("MAC is not available for this SI, "
174 mac = (uint32_t *)hw->mac.addr;
175 *mac = (uint32_t)rte_rand();
176 first_byte = (char *)mac;
177 *first_byte &= 0xfe; /* clear multicast bit */
178 *first_byte |= 0x02; /* set local assignment bit (IEEE802) */
180 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
182 *mac = (uint16_t)rte_rand();
183 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
184 print_ethaddr("New address: ",
185 (const struct rte_ether_addr *)hw->mac.addr);
192 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
193 struct rte_eth_dev_info *dev_info)
195 PMD_INIT_FUNC_TRACE();
196 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
197 .nb_max = MAX_BD_COUNT,
198 .nb_min = MIN_BD_COUNT,
199 .nb_align = BD_ALIGN,
201 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
202 .nb_max = MAX_BD_COUNT,
203 .nb_min = MIN_BD_COUNT,
204 .nb_align = BD_ALIGN,
206 dev_info->max_rx_queues = MAX_RX_RINGS;
207 dev_info->max_tx_queues = MAX_TX_RINGS;
208 dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
209 dev_info->rx_offload_capa =
210 (DEV_RX_OFFLOAD_IPV4_CKSUM |
211 DEV_RX_OFFLOAD_UDP_CKSUM |
212 DEV_RX_OFFLOAD_TCP_CKSUM |
213 DEV_RX_OFFLOAD_KEEP_CRC);
219 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
223 size = nb_desc * sizeof(struct enetc_swbd);
224 txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
225 if (txr->q_swbd == NULL)
228 size = nb_desc * sizeof(struct enetc_tx_bd);
229 txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
230 if (txr->bd_base == NULL) {
231 rte_free(txr->q_swbd);
236 txr->bd_count = nb_desc;
237 txr->next_to_clean = 0;
238 txr->next_to_use = 0;
244 enetc_free_bdr(struct enetc_bdr *rxr)
246 rte_free(rxr->q_swbd);
247 rte_free(rxr->bd_base);
253 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
255 int idx = tx_ring->index;
256 phys_addr_t bd_address;
258 bd_address = (phys_addr_t)
259 rte_mem_virt2iova((const void *)tx_ring->bd_base);
260 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
261 lower_32_bits((uint64_t)bd_address));
262 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
263 upper_32_bits((uint64_t)bd_address));
264 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
265 ENETC_RTBLENR_LEN(tx_ring->bd_count));
267 enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
268 enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
269 tx_ring->tcir = (void *)((size_t)hw->reg +
270 ENETC_BDR(TX, idx, ENETC_TBCIR));
271 tx_ring->tcisr = (void *)((size_t)hw->reg +
272 ENETC_BDR(TX, idx, ENETC_TBCISR));
276 enetc_tx_queue_setup(struct rte_eth_dev *dev,
279 unsigned int socket_id __rte_unused,
280 const struct rte_eth_txconf *tx_conf)
283 struct enetc_bdr *tx_ring;
284 struct rte_eth_dev_data *data = dev->data;
285 struct enetc_eth_adapter *priv =
286 ENETC_DEV_PRIVATE(data->dev_private);
288 PMD_INIT_FUNC_TRACE();
289 if (nb_desc > MAX_BD_COUNT)
292 tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
293 if (tx_ring == NULL) {
294 ENETC_PMD_ERR("Failed to allocate TX ring memory");
299 err = enetc_alloc_txbdr(tx_ring, nb_desc);
303 tx_ring->index = queue_idx;
305 enetc_setup_txbdr(&priv->hw.hw, tx_ring);
306 data->tx_queues[queue_idx] = tx_ring;
308 if (!tx_conf->tx_deferred_start) {
310 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
311 ENETC_TBMR, ENETC_TBMR_EN);
312 dev->data->tx_queue_state[tx_ring->index] =
313 RTE_ETH_QUEUE_STATE_STARTED;
315 dev->data->tx_queue_state[tx_ring->index] =
316 RTE_ETH_QUEUE_STATE_STOPPED;
327 enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
329 void *txq = dev->data->tx_queues[qid];
334 struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
335 struct enetc_eth_hw *eth_hw =
336 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
338 struct enetc_swbd *tx_swbd;
342 /* Disable the ring */
344 val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
345 val &= (~ENETC_TBMR_EN);
346 enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
349 i = tx_ring->next_to_clean;
350 tx_swbd = &tx_ring->q_swbd[i];
351 while (tx_swbd->buffer_addr != NULL) {
352 rte_pktmbuf_free(tx_swbd->buffer_addr);
353 tx_swbd->buffer_addr = NULL;
356 if (unlikely(i == tx_ring->bd_count)) {
358 tx_swbd = &tx_ring->q_swbd[i];
362 enetc_free_bdr(tx_ring);
367 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
372 size = nb_rx_desc * sizeof(struct enetc_swbd);
373 rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
374 if (rxr->q_swbd == NULL)
377 size = nb_rx_desc * sizeof(union enetc_rx_bd);
378 rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
379 if (rxr->bd_base == NULL) {
380 rte_free(rxr->q_swbd);
385 rxr->bd_count = nb_rx_desc;
386 rxr->next_to_clean = 0;
387 rxr->next_to_use = 0;
388 rxr->next_to_alloc = 0;
394 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
395 struct rte_mempool *mb_pool)
397 int idx = rx_ring->index;
399 phys_addr_t bd_address;
401 bd_address = (phys_addr_t)
402 rte_mem_virt2iova((const void *)rx_ring->bd_base);
403 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
404 lower_32_bits((uint64_t)bd_address));
405 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
406 upper_32_bits((uint64_t)bd_address));
407 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
408 ENETC_RTBLENR_LEN(rx_ring->bd_count));
410 rx_ring->mb_pool = mb_pool;
411 rx_ring->rcir = (void *)((size_t)hw->reg +
412 ENETC_BDR(RX, idx, ENETC_RBCIR));
413 enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
414 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
415 RTE_PKTMBUF_HEADROOM);
416 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
417 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
421 enetc_rx_queue_setup(struct rte_eth_dev *dev,
422 uint16_t rx_queue_id,
424 unsigned int socket_id __rte_unused,
425 const struct rte_eth_rxconf *rx_conf,
426 struct rte_mempool *mb_pool)
429 struct enetc_bdr *rx_ring;
430 struct rte_eth_dev_data *data = dev->data;
431 struct enetc_eth_adapter *adapter =
432 ENETC_DEV_PRIVATE(data->dev_private);
433 uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
435 PMD_INIT_FUNC_TRACE();
436 if (nb_rx_desc > MAX_BD_COUNT)
439 rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
440 if (rx_ring == NULL) {
441 ENETC_PMD_ERR("Failed to allocate RX ring memory");
446 err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
450 rx_ring->index = rx_queue_id;
452 enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
453 data->rx_queues[rx_queue_id] = rx_ring;
455 if (!rx_conf->rx_deferred_start) {
457 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
459 dev->data->rx_queue_state[rx_ring->index] =
460 RTE_ETH_QUEUE_STATE_STARTED;
462 dev->data->rx_queue_state[rx_ring->index] =
463 RTE_ETH_QUEUE_STATE_STOPPED;
466 rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
467 RTE_ETHER_CRC_LEN : 0);
477 enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
479 void *rxq = dev->data->rx_queues[qid];
484 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
485 struct enetc_eth_hw *eth_hw =
486 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
487 struct enetc_swbd *q_swbd;
492 /* Disable the ring */
494 val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
495 val &= (~ENETC_RBMR_EN);
496 enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
499 i = rx_ring->next_to_clean;
500 q_swbd = &rx_ring->q_swbd[i];
501 while (i != rx_ring->next_to_use) {
502 rte_pktmbuf_free(q_swbd->buffer_addr);
503 q_swbd->buffer_addr = NULL;
506 if (unlikely(i == rx_ring->bd_count)) {
508 q_swbd = &rx_ring->q_swbd[i];
512 enetc_free_bdr(rx_ring);
517 int enetc_stats_get(struct rte_eth_dev *dev,
518 struct rte_eth_stats *stats)
520 struct enetc_eth_hw *hw =
521 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
522 struct enetc_hw *enetc_hw = &hw->hw;
524 /* Total received packets, bad + good, if we want to get counters of
525 * only good received packets then use ENETC_PM0_RFRM,
526 * ENETC_PM0_TFRM registers.
528 stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
529 stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
530 stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
531 stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
532 /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
535 stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
536 stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
537 stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
543 enetc_stats_reset(struct rte_eth_dev *dev)
545 struct enetc_eth_hw *hw =
546 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
547 struct enetc_hw *enetc_hw = &hw->hw;
549 enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
555 enetc_dev_close(struct rte_eth_dev *dev)
560 PMD_INIT_FUNC_TRACE();
561 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
564 ret = enetc_dev_stop(dev);
566 for (i = 0; i < dev->data->nb_rx_queues; i++) {
567 enetc_rx_queue_release(dev, i);
568 dev->data->rx_queues[i] = NULL;
570 dev->data->nb_rx_queues = 0;
572 for (i = 0; i < dev->data->nb_tx_queues; i++) {
573 enetc_tx_queue_release(dev, i);
574 dev->data->tx_queues[i] = NULL;
576 dev->data->nb_tx_queues = 0;
578 if (rte_eal_iova_mode() == RTE_IOVA_PA)
579 dpaax_iova_table_depopulate();
585 enetc_promiscuous_enable(struct rte_eth_dev *dev)
587 struct enetc_eth_hw *hw =
588 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
589 struct enetc_hw *enetc_hw = &hw->hw;
592 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
594 /* Setting to enable promiscuous mode*/
595 psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
597 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
603 enetc_promiscuous_disable(struct rte_eth_dev *dev)
605 struct enetc_eth_hw *hw =
606 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
607 struct enetc_hw *enetc_hw = &hw->hw;
610 /* Setting to disable promiscuous mode for SI0*/
611 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
612 psipmr &= (~ENETC_PSIPMR_SET_UP(0));
614 if (dev->data->all_multicast == 0)
615 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
617 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
623 enetc_allmulticast_enable(struct rte_eth_dev *dev)
625 struct enetc_eth_hw *hw =
626 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
627 struct enetc_hw *enetc_hw = &hw->hw;
630 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
632 /* Setting to enable allmulticast mode for SI0*/
633 psipmr |= ENETC_PSIPMR_SET_MP(0);
635 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
641 enetc_allmulticast_disable(struct rte_eth_dev *dev)
643 struct enetc_eth_hw *hw =
644 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
645 struct enetc_hw *enetc_hw = &hw->hw;
648 if (dev->data->promiscuous == 1)
649 return 0; /* must remain in all_multicast mode */
651 /* Setting to disable all multicast mode for SI0*/
652 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
653 ~(ENETC_PSIPMR_SET_MP(0));
655 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
661 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
663 struct enetc_eth_hw *hw =
664 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
665 struct enetc_hw *enetc_hw = &hw->hw;
666 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
669 * Refuse mtu that requires the support of scattered packets
670 * when this feature has not been enabled before.
672 if (dev->data->min_rx_buf_size &&
673 !dev->data->scattered_rx && frame_size >
674 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
675 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
679 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
680 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
683 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
684 ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
690 enetc_dev_configure(struct rte_eth_dev *dev)
692 struct enetc_eth_hw *hw =
693 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
694 struct enetc_hw *enetc_hw = &hw->hw;
695 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
696 uint64_t rx_offloads = eth_conf->rxmode.offloads;
697 uint32_t checksum = L3_CKSUM | L4_CKSUM;
700 PMD_INIT_FUNC_TRACE();
702 max_len = dev->data->dev_conf.rxmode.mtu + RTE_ETHER_HDR_LEN +
704 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(max_len));
705 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
706 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
708 if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
711 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
712 config |= ENETC_PM0_CRC;
713 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
716 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
717 checksum &= ~L3_CKSUM;
719 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
720 checksum &= ~L4_CKSUM;
722 enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
729 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
731 struct enetc_eth_adapter *priv =
732 ENETC_DEV_PRIVATE(dev->data->dev_private);
733 struct enetc_bdr *rx_ring;
736 rx_ring = dev->data->rx_queues[qidx];
737 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
738 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
740 rx_data = rx_data | ENETC_RBMR_EN;
741 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
743 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
750 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
752 struct enetc_eth_adapter *priv =
753 ENETC_DEV_PRIVATE(dev->data->dev_private);
754 struct enetc_bdr *rx_ring;
757 rx_ring = dev->data->rx_queues[qidx];
758 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
759 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
761 rx_data = rx_data & (~ENETC_RBMR_EN);
762 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
764 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
771 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
773 struct enetc_eth_adapter *priv =
774 ENETC_DEV_PRIVATE(dev->data->dev_private);
775 struct enetc_bdr *tx_ring;
778 tx_ring = dev->data->tx_queues[qidx];
779 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
780 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
782 tx_data = tx_data | ENETC_TBMR_EN;
783 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
785 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
792 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
794 struct enetc_eth_adapter *priv =
795 ENETC_DEV_PRIVATE(dev->data->dev_private);
796 struct enetc_bdr *tx_ring;
799 tx_ring = dev->data->tx_queues[qidx];
800 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
801 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
803 tx_data = tx_data & (~ENETC_TBMR_EN);
804 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
806 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
813 * The set of PCI devices this driver supports
815 static const struct rte_pci_id pci_id_enetc_map[] = {
816 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
817 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
818 { .vendor_id = 0, /* sentinel */ },
821 /* Features supported by this driver */
822 static const struct eth_dev_ops enetc_ops = {
823 .dev_configure = enetc_dev_configure,
824 .dev_start = enetc_dev_start,
825 .dev_stop = enetc_dev_stop,
826 .dev_close = enetc_dev_close,
827 .link_update = enetc_link_update,
828 .stats_get = enetc_stats_get,
829 .stats_reset = enetc_stats_reset,
830 .promiscuous_enable = enetc_promiscuous_enable,
831 .promiscuous_disable = enetc_promiscuous_disable,
832 .allmulticast_enable = enetc_allmulticast_enable,
833 .allmulticast_disable = enetc_allmulticast_disable,
834 .dev_infos_get = enetc_dev_infos_get,
835 .mtu_set = enetc_mtu_set,
836 .rx_queue_setup = enetc_rx_queue_setup,
837 .rx_queue_start = enetc_rx_queue_start,
838 .rx_queue_stop = enetc_rx_queue_stop,
839 .rx_queue_release = enetc_rx_queue_release,
840 .tx_queue_setup = enetc_tx_queue_setup,
841 .tx_queue_start = enetc_tx_queue_start,
842 .tx_queue_stop = enetc_tx_queue_stop,
843 .tx_queue_release = enetc_tx_queue_release,
844 .dev_supported_ptypes_get = enetc_supported_ptypes_get,
848 * Initialisation of the enetc device
851 * - Pointer to the structure rte_eth_dev
854 * - On success, zero.
855 * - On failure, negative value.
858 enetc_dev_init(struct rte_eth_dev *eth_dev)
861 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
862 struct enetc_eth_hw *hw =
863 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
865 PMD_INIT_FUNC_TRACE();
866 eth_dev->dev_ops = &enetc_ops;
867 eth_dev->rx_pkt_burst = &enetc_recv_pkts;
868 eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
870 /* Retrieving and storing the HW base address of device */
871 hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
872 hw->device_id = pci_dev->id.device_id;
874 error = enetc_hardware_init(hw);
876 ENETC_PMD_ERR("Hardware initialization failed");
880 /* Allocate memory for storing MAC addresses */
881 eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
882 RTE_ETHER_ADDR_LEN, 0);
883 if (!eth_dev->data->mac_addrs) {
884 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
885 "store MAC addresses",
886 RTE_ETHER_ADDR_LEN * 1);
891 /* Copy the permanent MAC address */
892 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
893 ð_dev->data->mac_addrs[0]);
896 enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
897 ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
898 eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
901 if (rte_eal_iova_mode() == RTE_IOVA_PA)
902 dpaax_iova_table_populate();
904 ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
905 eth_dev->data->port_id, pci_dev->id.vendor_id,
906 pci_dev->id.device_id);
911 enetc_dev_uninit(struct rte_eth_dev *eth_dev)
913 PMD_INIT_FUNC_TRACE();
915 return enetc_dev_close(eth_dev);
919 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
920 struct rte_pci_device *pci_dev)
922 return rte_eth_dev_pci_generic_probe(pci_dev,
923 sizeof(struct enetc_eth_adapter),
928 enetc_pci_remove(struct rte_pci_device *pci_dev)
930 return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
933 static struct rte_pci_driver rte_enetc_pmd = {
934 .id_table = pci_id_enetc_map,
935 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
936 .probe = enetc_pci_probe,
937 .remove = enetc_pci_remove,
940 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
941 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
942 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
943 RTE_LOG_REGISTER_DEFAULT(enetc_logtype_pmd, NOTICE);