1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
6 #include <rte_ethdev_pci.h>
7 #include <rte_random.h>
9 #include "enetc_logs.h"
12 int enetc_logtype_pmd;
15 enetc_dev_start(struct rte_eth_dev *dev)
17 struct enetc_eth_hw *hw =
18 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
19 struct enetc_hw *enetc_hw = &hw->hw;
22 PMD_INIT_FUNC_TRACE();
23 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
24 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
25 val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
28 val = enetc_port_rd(enetc_hw, ENETC_PMR);
29 enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
31 /* set auto-speed for RGMII */
32 if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
33 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
34 ENETC_PM0_IFM_RGAUTO);
35 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
36 ENETC_PM0_IFM_RGAUTO);
38 if (enetc_global_rd(enetc_hw,
39 ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
40 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
42 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
50 enetc_dev_stop(struct rte_eth_dev *dev)
52 struct enetc_eth_hw *hw =
53 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
54 struct enetc_hw *enetc_hw = &hw->hw;
57 PMD_INIT_FUNC_TRACE();
59 val = enetc_port_rd(enetc_hw, ENETC_PMR);
60 enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
62 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
63 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
64 val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
67 static const uint32_t *
68 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
70 static const uint32_t ptypes[] = {
84 /* return 0 means link status changed, -1 means not changed */
86 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
88 struct enetc_eth_hw *hw =
89 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
90 struct enetc_hw *enetc_hw = &hw->hw;
91 struct rte_eth_link link;
94 PMD_INIT_FUNC_TRACE();
96 memset(&link, 0, sizeof(link));
98 status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
100 if (status & ENETC_LINK_MODE)
101 link.link_duplex = ETH_LINK_FULL_DUPLEX;
103 link.link_duplex = ETH_LINK_HALF_DUPLEX;
105 if (status & ENETC_LINK_STATUS)
106 link.link_status = ETH_LINK_UP;
108 link.link_status = ETH_LINK_DOWN;
110 switch (status & ENETC_LINK_SPEED_MASK) {
111 case ENETC_LINK_SPEED_1G:
112 link.link_speed = ETH_SPEED_NUM_1G;
115 case ENETC_LINK_SPEED_100M:
116 link.link_speed = ETH_SPEED_NUM_100M;
120 case ENETC_LINK_SPEED_10M:
121 link.link_speed = ETH_SPEED_NUM_10M;
124 return rte_eth_linkstatus_set(dev, &link);
128 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
130 char buf[RTE_ETHER_ADDR_FMT_SIZE];
132 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
133 ENETC_PMD_INFO("%s%s\n", name, buf);
137 enetc_hardware_init(struct enetc_eth_hw *hw)
139 struct enetc_hw *enetc_hw = &hw->hw;
140 uint32_t *mac = (uint32_t *)hw->mac.addr;
141 uint32_t high_mac = 0;
142 uint16_t low_mac = 0;
144 PMD_INIT_FUNC_TRACE();
145 /* Calculating and storing the base HW addresses */
146 hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
147 hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
149 /* Enabling Station Interface */
150 enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
152 *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
153 high_mac = (uint32_t)*mac;
155 *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
156 low_mac = (uint16_t)*mac;
158 if ((high_mac | low_mac) == 0) {
161 mac = (uint32_t *)hw->mac.addr;
162 *mac = (uint32_t)rte_rand();
163 first_byte = (char *)mac;
164 *first_byte &= 0xfe; /* clear multicast bit */
165 *first_byte |= 0x02; /* set local assignment bit (IEEE802) */
167 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
169 *mac = (uint16_t)rte_rand();
170 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
171 print_ethaddr("New address: ",
172 (const struct rte_ether_addr *)hw->mac.addr);
179 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
180 struct rte_eth_dev_info *dev_info)
182 PMD_INIT_FUNC_TRACE();
183 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
184 .nb_max = MAX_BD_COUNT,
185 .nb_min = MIN_BD_COUNT,
186 .nb_align = BD_ALIGN,
188 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
189 .nb_max = MAX_BD_COUNT,
190 .nb_min = MIN_BD_COUNT,
191 .nb_align = BD_ALIGN,
193 dev_info->max_rx_queues = MAX_RX_RINGS;
194 dev_info->max_tx_queues = MAX_TX_RINGS;
195 dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
196 dev_info->rx_offload_capa =
197 (DEV_RX_OFFLOAD_IPV4_CKSUM |
198 DEV_RX_OFFLOAD_UDP_CKSUM |
199 DEV_RX_OFFLOAD_TCP_CKSUM |
200 DEV_RX_OFFLOAD_KEEP_CRC |
201 DEV_RX_OFFLOAD_JUMBO_FRAME);
207 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
211 size = nb_desc * sizeof(struct enetc_swbd);
212 txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
213 if (txr->q_swbd == NULL)
216 size = nb_desc * sizeof(struct enetc_tx_bd);
217 txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
218 if (txr->bd_base == NULL) {
219 rte_free(txr->q_swbd);
224 txr->bd_count = nb_desc;
225 txr->next_to_clean = 0;
226 txr->next_to_use = 0;
232 enetc_free_bdr(struct enetc_bdr *rxr)
234 rte_free(rxr->q_swbd);
235 rte_free(rxr->bd_base);
241 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
243 int idx = tx_ring->index;
244 phys_addr_t bd_address;
246 bd_address = (phys_addr_t)
247 rte_mem_virt2iova((const void *)tx_ring->bd_base);
248 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
249 lower_32_bits((uint64_t)bd_address));
250 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
251 upper_32_bits((uint64_t)bd_address));
252 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
253 ENETC_RTBLENR_LEN(tx_ring->bd_count));
255 enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
256 enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
257 tx_ring->tcir = (void *)((size_t)hw->reg +
258 ENETC_BDR(TX, idx, ENETC_TBCIR));
259 tx_ring->tcisr = (void *)((size_t)hw->reg +
260 ENETC_BDR(TX, idx, ENETC_TBCISR));
264 enetc_tx_queue_setup(struct rte_eth_dev *dev,
267 unsigned int socket_id __rte_unused,
268 const struct rte_eth_txconf *tx_conf)
271 struct enetc_bdr *tx_ring;
272 struct rte_eth_dev_data *data = dev->data;
273 struct enetc_eth_adapter *priv =
274 ENETC_DEV_PRIVATE(data->dev_private);
276 PMD_INIT_FUNC_TRACE();
277 if (nb_desc > MAX_BD_COUNT)
280 tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
281 if (tx_ring == NULL) {
282 ENETC_PMD_ERR("Failed to allocate TX ring memory");
287 err = enetc_alloc_txbdr(tx_ring, nb_desc);
291 tx_ring->index = queue_idx;
293 enetc_setup_txbdr(&priv->hw.hw, tx_ring);
294 data->tx_queues[queue_idx] = tx_ring;
296 if (!tx_conf->tx_deferred_start) {
298 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
299 ENETC_TBMR, ENETC_TBMR_EN);
300 dev->data->tx_queue_state[tx_ring->index] =
301 RTE_ETH_QUEUE_STATE_STARTED;
303 dev->data->tx_queue_state[tx_ring->index] =
304 RTE_ETH_QUEUE_STATE_STOPPED;
315 enetc_tx_queue_release(void *txq)
320 struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
321 struct enetc_eth_hw *eth_hw =
322 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
324 struct enetc_swbd *tx_swbd;
328 /* Disable the ring */
330 val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
331 val &= (~ENETC_TBMR_EN);
332 enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
335 i = tx_ring->next_to_clean;
336 tx_swbd = &tx_ring->q_swbd[i];
337 while (tx_swbd->buffer_addr != NULL) {
338 rte_pktmbuf_free(tx_swbd->buffer_addr);
339 tx_swbd->buffer_addr = NULL;
342 if (unlikely(i == tx_ring->bd_count)) {
344 tx_swbd = &tx_ring->q_swbd[i];
348 enetc_free_bdr(tx_ring);
353 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
358 size = nb_rx_desc * sizeof(struct enetc_swbd);
359 rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
360 if (rxr->q_swbd == NULL)
363 size = nb_rx_desc * sizeof(union enetc_rx_bd);
364 rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
365 if (rxr->bd_base == NULL) {
366 rte_free(rxr->q_swbd);
371 rxr->bd_count = nb_rx_desc;
372 rxr->next_to_clean = 0;
373 rxr->next_to_use = 0;
374 rxr->next_to_alloc = 0;
380 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
381 struct rte_mempool *mb_pool)
383 int idx = rx_ring->index;
385 phys_addr_t bd_address;
387 bd_address = (phys_addr_t)
388 rte_mem_virt2iova((const void *)rx_ring->bd_base);
389 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
390 lower_32_bits((uint64_t)bd_address));
391 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
392 upper_32_bits((uint64_t)bd_address));
393 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
394 ENETC_RTBLENR_LEN(rx_ring->bd_count));
396 rx_ring->mb_pool = mb_pool;
397 rx_ring->rcir = (void *)((size_t)hw->reg +
398 ENETC_BDR(RX, idx, ENETC_RBCIR));
399 enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
400 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
401 RTE_PKTMBUF_HEADROOM);
402 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
403 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
407 enetc_rx_queue_setup(struct rte_eth_dev *dev,
408 uint16_t rx_queue_id,
410 unsigned int socket_id __rte_unused,
411 const struct rte_eth_rxconf *rx_conf,
412 struct rte_mempool *mb_pool)
415 struct enetc_bdr *rx_ring;
416 struct rte_eth_dev_data *data = dev->data;
417 struct enetc_eth_adapter *adapter =
418 ENETC_DEV_PRIVATE(data->dev_private);
419 uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
421 PMD_INIT_FUNC_TRACE();
422 if (nb_rx_desc > MAX_BD_COUNT)
425 rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
426 if (rx_ring == NULL) {
427 ENETC_PMD_ERR("Failed to allocate RX ring memory");
432 err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
436 rx_ring->index = rx_queue_id;
438 enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
439 data->rx_queues[rx_queue_id] = rx_ring;
441 if (!rx_conf->rx_deferred_start) {
443 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
445 dev->data->rx_queue_state[rx_ring->index] =
446 RTE_ETH_QUEUE_STATE_STARTED;
448 dev->data->rx_queue_state[rx_ring->index] =
449 RTE_ETH_QUEUE_STATE_STOPPED;
452 rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
453 RTE_ETHER_CRC_LEN : 0);
463 enetc_rx_queue_release(void *rxq)
468 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
469 struct enetc_eth_hw *eth_hw =
470 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
471 struct enetc_swbd *q_swbd;
476 /* Disable the ring */
478 val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
479 val &= (~ENETC_RBMR_EN);
480 enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
483 i = rx_ring->next_to_clean;
484 q_swbd = &rx_ring->q_swbd[i];
485 while (i != rx_ring->next_to_use) {
486 rte_pktmbuf_free(q_swbd->buffer_addr);
487 q_swbd->buffer_addr = NULL;
490 if (unlikely(i == rx_ring->bd_count)) {
492 q_swbd = &rx_ring->q_swbd[i];
496 enetc_free_bdr(rx_ring);
501 int enetc_stats_get(struct rte_eth_dev *dev,
502 struct rte_eth_stats *stats)
504 struct enetc_eth_hw *hw =
505 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
506 struct enetc_hw *enetc_hw = &hw->hw;
508 /* Total received packets, bad + good, if we want to get counters of
509 * only good received packets then use ENETC_PM0_RFRM,
510 * ENETC_PM0_TFRM registers.
512 stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
513 stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
514 stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
515 stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
516 /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
519 stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
520 stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
521 stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
527 enetc_stats_reset(struct rte_eth_dev *dev)
529 struct enetc_eth_hw *hw =
530 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
531 struct enetc_hw *enetc_hw = &hw->hw;
533 enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
539 enetc_dev_close(struct rte_eth_dev *dev)
543 PMD_INIT_FUNC_TRACE();
546 for (i = 0; i < dev->data->nb_rx_queues; i++) {
547 enetc_rx_queue_release(dev->data->rx_queues[i]);
548 dev->data->rx_queues[i] = NULL;
550 dev->data->nb_rx_queues = 0;
552 for (i = 0; i < dev->data->nb_tx_queues; i++) {
553 enetc_tx_queue_release(dev->data->tx_queues[i]);
554 dev->data->tx_queues[i] = NULL;
556 dev->data->nb_tx_queues = 0;
560 enetc_promiscuous_enable(struct rte_eth_dev *dev)
562 struct enetc_eth_hw *hw =
563 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
564 struct enetc_hw *enetc_hw = &hw->hw;
567 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
569 /* Setting to enable promiscuous mode*/
570 psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
572 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
578 enetc_promiscuous_disable(struct rte_eth_dev *dev)
580 struct enetc_eth_hw *hw =
581 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
582 struct enetc_hw *enetc_hw = &hw->hw;
585 /* Setting to disable promiscuous mode for SI0*/
586 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
587 psipmr &= (~ENETC_PSIPMR_SET_UP(0));
589 if (dev->data->all_multicast == 0)
590 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
592 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
598 enetc_allmulticast_enable(struct rte_eth_dev *dev)
600 struct enetc_eth_hw *hw =
601 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
602 struct enetc_hw *enetc_hw = &hw->hw;
605 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
607 /* Setting to enable allmulticast mode for SI0*/
608 psipmr |= ENETC_PSIPMR_SET_MP(0);
610 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
616 enetc_allmulticast_disable(struct rte_eth_dev *dev)
618 struct enetc_eth_hw *hw =
619 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
620 struct enetc_hw *enetc_hw = &hw->hw;
623 if (dev->data->promiscuous == 1)
624 return 0; /* must remain in all_multicast mode */
626 /* Setting to disable all multicast mode for SI0*/
627 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
628 ~(ENETC_PSIPMR_SET_MP(0));
630 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
636 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
638 struct enetc_eth_hw *hw =
639 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
640 struct enetc_hw *enetc_hw = &hw->hw;
641 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
643 /* check that mtu is within the allowed range */
644 if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
648 * Refuse mtu that requires the support of scattered packets
649 * when this feature has not been enabled before.
651 if (dev->data->min_rx_buf_size &&
652 !dev->data->scattered_rx && frame_size >
653 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
654 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
658 if (frame_size > RTE_ETHER_MAX_LEN)
659 dev->data->dev_conf.rxmode.offloads &=
660 DEV_RX_OFFLOAD_JUMBO_FRAME;
662 dev->data->dev_conf.rxmode.offloads &=
663 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
665 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
666 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
668 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
671 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
672 ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
678 enetc_dev_configure(struct rte_eth_dev *dev)
680 struct enetc_eth_hw *hw =
681 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
682 struct enetc_hw *enetc_hw = &hw->hw;
683 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
684 uint64_t rx_offloads = eth_conf->rxmode.offloads;
685 uint32_t checksum = L3_CKSUM | L4_CKSUM;
687 PMD_INIT_FUNC_TRACE();
689 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
692 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
694 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
695 ENETC_SET_MAXFRM(max_len));
696 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
697 ENETC_MAC_MAXFRM_SIZE);
698 enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
699 2 * ENETC_MAC_MAXFRM_SIZE);
700 dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
704 if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
707 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
708 config |= ENETC_PM0_CRC;
709 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
712 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
713 checksum &= ~L3_CKSUM;
715 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
716 checksum &= ~L4_CKSUM;
718 enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
725 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
727 struct enetc_eth_adapter *priv =
728 ENETC_DEV_PRIVATE(dev->data->dev_private);
729 struct enetc_bdr *rx_ring;
732 rx_ring = dev->data->rx_queues[qidx];
733 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
734 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
736 rx_data = rx_data | ENETC_RBMR_EN;
737 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
739 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
746 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
748 struct enetc_eth_adapter *priv =
749 ENETC_DEV_PRIVATE(dev->data->dev_private);
750 struct enetc_bdr *rx_ring;
753 rx_ring = dev->data->rx_queues[qidx];
754 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
755 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
757 rx_data = rx_data & (~ENETC_RBMR_EN);
758 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
760 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
767 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
769 struct enetc_eth_adapter *priv =
770 ENETC_DEV_PRIVATE(dev->data->dev_private);
771 struct enetc_bdr *tx_ring;
774 tx_ring = dev->data->tx_queues[qidx];
775 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
776 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
778 tx_data = tx_data | ENETC_TBMR_EN;
779 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
781 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
788 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
790 struct enetc_eth_adapter *priv =
791 ENETC_DEV_PRIVATE(dev->data->dev_private);
792 struct enetc_bdr *tx_ring;
795 tx_ring = dev->data->tx_queues[qidx];
796 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
797 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
799 tx_data = tx_data & (~ENETC_TBMR_EN);
800 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
802 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
809 * The set of PCI devices this driver supports
811 static const struct rte_pci_id pci_id_enetc_map[] = {
812 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
813 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
814 { .vendor_id = 0, /* sentinel */ },
817 /* Features supported by this driver */
818 static const struct eth_dev_ops enetc_ops = {
819 .dev_configure = enetc_dev_configure,
820 .dev_start = enetc_dev_start,
821 .dev_stop = enetc_dev_stop,
822 .dev_close = enetc_dev_close,
823 .link_update = enetc_link_update,
824 .stats_get = enetc_stats_get,
825 .stats_reset = enetc_stats_reset,
826 .promiscuous_enable = enetc_promiscuous_enable,
827 .promiscuous_disable = enetc_promiscuous_disable,
828 .allmulticast_enable = enetc_allmulticast_enable,
829 .allmulticast_disable = enetc_allmulticast_disable,
830 .dev_infos_get = enetc_dev_infos_get,
831 .mtu_set = enetc_mtu_set,
832 .rx_queue_setup = enetc_rx_queue_setup,
833 .rx_queue_start = enetc_rx_queue_start,
834 .rx_queue_stop = enetc_rx_queue_stop,
835 .rx_queue_release = enetc_rx_queue_release,
836 .tx_queue_setup = enetc_tx_queue_setup,
837 .tx_queue_start = enetc_tx_queue_start,
838 .tx_queue_stop = enetc_tx_queue_stop,
839 .tx_queue_release = enetc_tx_queue_release,
840 .dev_supported_ptypes_get = enetc_supported_ptypes_get,
844 * Initialisation of the enetc device
847 * - Pointer to the structure rte_eth_dev
850 * - On success, zero.
851 * - On failure, negative value.
854 enetc_dev_init(struct rte_eth_dev *eth_dev)
857 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
858 struct enetc_eth_hw *hw =
859 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
861 PMD_INIT_FUNC_TRACE();
862 eth_dev->dev_ops = &enetc_ops;
863 eth_dev->rx_pkt_burst = &enetc_recv_pkts;
864 eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
866 /* Retrieving and storing the HW base address of device */
867 hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
868 hw->device_id = pci_dev->id.device_id;
870 error = enetc_hardware_init(hw);
872 ENETC_PMD_ERR("Hardware initialization failed");
876 /* Allocate memory for storing MAC addresses */
877 eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
878 RTE_ETHER_ADDR_LEN, 0);
879 if (!eth_dev->data->mac_addrs) {
880 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
881 "store MAC addresses",
882 RTE_ETHER_ADDR_LEN * 1);
887 /* Copy the permanent MAC address */
888 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
889 ð_dev->data->mac_addrs[0]);
892 enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
893 ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
894 eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
897 ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
898 eth_dev->data->port_id, pci_dev->id.vendor_id,
899 pci_dev->id.device_id);
904 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
906 PMD_INIT_FUNC_TRACE();
911 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
912 struct rte_pci_device *pci_dev)
914 return rte_eth_dev_pci_generic_probe(pci_dev,
915 sizeof(struct enetc_eth_adapter),
920 enetc_pci_remove(struct rte_pci_device *pci_dev)
922 return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
925 static struct rte_pci_driver rte_enetc_pmd = {
926 .id_table = pci_id_enetc_map,
927 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
928 .probe = enetc_pci_probe,
929 .remove = enetc_pci_remove,
932 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
933 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
934 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
936 RTE_INIT(enetc_pmd_init_log)
938 enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
939 if (enetc_logtype_pmd >= 0)
940 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);