1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
6 #include <rte_ethdev_pci.h>
7 #include <rte_random.h>
9 #include "enetc_logs.h"
12 int enetc_logtype_pmd;
15 enetc_dev_start(struct rte_eth_dev *dev)
17 struct enetc_eth_hw *hw =
18 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
19 struct enetc_hw *enetc_hw = &hw->hw;
22 PMD_INIT_FUNC_TRACE();
23 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
24 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
25 val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
28 val = enetc_port_rd(enetc_hw, ENETC_PMR);
29 enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
31 /* set auto-speed for RGMII */
32 if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
33 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
34 ENETC_PM0_IFM_RGAUTO);
35 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
36 ENETC_PM0_IFM_RGAUTO);
38 if (enetc_global_rd(enetc_hw,
39 ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
40 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
42 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
50 enetc_dev_stop(struct rte_eth_dev *dev)
52 struct enetc_eth_hw *hw =
53 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
54 struct enetc_hw *enetc_hw = &hw->hw;
57 PMD_INIT_FUNC_TRACE();
59 val = enetc_port_rd(enetc_hw, ENETC_PMR);
60 enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
62 val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
63 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
64 val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
67 static const uint32_t *
68 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
70 static const uint32_t ptypes[] = {
84 /* return 0 means link status changed, -1 means not changed */
86 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
88 struct enetc_eth_hw *hw =
89 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
90 struct enetc_hw *enetc_hw = &hw->hw;
91 struct rte_eth_link link;
94 PMD_INIT_FUNC_TRACE();
96 memset(&link, 0, sizeof(link));
98 status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
100 if (status & ENETC_LINK_MODE)
101 link.link_duplex = ETH_LINK_FULL_DUPLEX;
103 link.link_duplex = ETH_LINK_HALF_DUPLEX;
105 if (status & ENETC_LINK_STATUS)
106 link.link_status = ETH_LINK_UP;
108 link.link_status = ETH_LINK_DOWN;
110 switch (status & ENETC_LINK_SPEED_MASK) {
111 case ENETC_LINK_SPEED_1G:
112 link.link_speed = ETH_SPEED_NUM_1G;
115 case ENETC_LINK_SPEED_100M:
116 link.link_speed = ETH_SPEED_NUM_100M;
120 case ENETC_LINK_SPEED_10M:
121 link.link_speed = ETH_SPEED_NUM_10M;
124 return rte_eth_linkstatus_set(dev, &link);
128 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
130 char buf[RTE_ETHER_ADDR_FMT_SIZE];
132 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
133 ENETC_PMD_NOTICE("%s%s\n", name, buf);
137 enetc_hardware_init(struct enetc_eth_hw *hw)
139 struct enetc_hw *enetc_hw = &hw->hw;
140 uint32_t *mac = (uint32_t *)hw->mac.addr;
141 uint32_t high_mac = 0;
142 uint16_t low_mac = 0;
144 PMD_INIT_FUNC_TRACE();
145 /* Calculating and storing the base HW addresses */
146 hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
147 hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
149 /* Enabling Station Interface */
150 enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
152 *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
153 high_mac = (uint32_t)*mac;
155 *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
156 low_mac = (uint16_t)*mac;
158 if ((high_mac | low_mac) == 0) {
161 ENETC_PMD_NOTICE("MAC is not available for this SI, "
163 mac = (uint32_t *)hw->mac.addr;
164 *mac = (uint32_t)rte_rand();
165 first_byte = (char *)mac;
166 *first_byte &= 0xfe; /* clear multicast bit */
167 *first_byte |= 0x02; /* set local assignment bit (IEEE802) */
169 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
171 *mac = (uint16_t)rte_rand();
172 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
173 print_ethaddr("New address: ",
174 (const struct rte_ether_addr *)hw->mac.addr);
181 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
182 struct rte_eth_dev_info *dev_info)
184 PMD_INIT_FUNC_TRACE();
185 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
186 .nb_max = MAX_BD_COUNT,
187 .nb_min = MIN_BD_COUNT,
188 .nb_align = BD_ALIGN,
190 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
191 .nb_max = MAX_BD_COUNT,
192 .nb_min = MIN_BD_COUNT,
193 .nb_align = BD_ALIGN,
195 dev_info->max_rx_queues = MAX_RX_RINGS;
196 dev_info->max_tx_queues = MAX_TX_RINGS;
197 dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
198 dev_info->rx_offload_capa =
199 (DEV_RX_OFFLOAD_IPV4_CKSUM |
200 DEV_RX_OFFLOAD_UDP_CKSUM |
201 DEV_RX_OFFLOAD_TCP_CKSUM |
202 DEV_RX_OFFLOAD_KEEP_CRC |
203 DEV_RX_OFFLOAD_JUMBO_FRAME);
209 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
213 size = nb_desc * sizeof(struct enetc_swbd);
214 txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
215 if (txr->q_swbd == NULL)
218 size = nb_desc * sizeof(struct enetc_tx_bd);
219 txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
220 if (txr->bd_base == NULL) {
221 rte_free(txr->q_swbd);
226 txr->bd_count = nb_desc;
227 txr->next_to_clean = 0;
228 txr->next_to_use = 0;
234 enetc_free_bdr(struct enetc_bdr *rxr)
236 rte_free(rxr->q_swbd);
237 rte_free(rxr->bd_base);
243 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
245 int idx = tx_ring->index;
246 phys_addr_t bd_address;
248 bd_address = (phys_addr_t)
249 rte_mem_virt2iova((const void *)tx_ring->bd_base);
250 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
251 lower_32_bits((uint64_t)bd_address));
252 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
253 upper_32_bits((uint64_t)bd_address));
254 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
255 ENETC_RTBLENR_LEN(tx_ring->bd_count));
257 enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
258 enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
259 tx_ring->tcir = (void *)((size_t)hw->reg +
260 ENETC_BDR(TX, idx, ENETC_TBCIR));
261 tx_ring->tcisr = (void *)((size_t)hw->reg +
262 ENETC_BDR(TX, idx, ENETC_TBCISR));
266 enetc_tx_queue_setup(struct rte_eth_dev *dev,
269 unsigned int socket_id __rte_unused,
270 const struct rte_eth_txconf *tx_conf)
273 struct enetc_bdr *tx_ring;
274 struct rte_eth_dev_data *data = dev->data;
275 struct enetc_eth_adapter *priv =
276 ENETC_DEV_PRIVATE(data->dev_private);
278 PMD_INIT_FUNC_TRACE();
279 if (nb_desc > MAX_BD_COUNT)
282 tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
283 if (tx_ring == NULL) {
284 ENETC_PMD_ERR("Failed to allocate TX ring memory");
289 err = enetc_alloc_txbdr(tx_ring, nb_desc);
293 tx_ring->index = queue_idx;
295 enetc_setup_txbdr(&priv->hw.hw, tx_ring);
296 data->tx_queues[queue_idx] = tx_ring;
298 if (!tx_conf->tx_deferred_start) {
300 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
301 ENETC_TBMR, ENETC_TBMR_EN);
302 dev->data->tx_queue_state[tx_ring->index] =
303 RTE_ETH_QUEUE_STATE_STARTED;
305 dev->data->tx_queue_state[tx_ring->index] =
306 RTE_ETH_QUEUE_STATE_STOPPED;
317 enetc_tx_queue_release(void *txq)
322 struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
323 struct enetc_eth_hw *eth_hw =
324 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
326 struct enetc_swbd *tx_swbd;
330 /* Disable the ring */
332 val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
333 val &= (~ENETC_TBMR_EN);
334 enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
337 i = tx_ring->next_to_clean;
338 tx_swbd = &tx_ring->q_swbd[i];
339 while (tx_swbd->buffer_addr != NULL) {
340 rte_pktmbuf_free(tx_swbd->buffer_addr);
341 tx_swbd->buffer_addr = NULL;
344 if (unlikely(i == tx_ring->bd_count)) {
346 tx_swbd = &tx_ring->q_swbd[i];
350 enetc_free_bdr(tx_ring);
355 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
360 size = nb_rx_desc * sizeof(struct enetc_swbd);
361 rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
362 if (rxr->q_swbd == NULL)
365 size = nb_rx_desc * sizeof(union enetc_rx_bd);
366 rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
367 if (rxr->bd_base == NULL) {
368 rte_free(rxr->q_swbd);
373 rxr->bd_count = nb_rx_desc;
374 rxr->next_to_clean = 0;
375 rxr->next_to_use = 0;
376 rxr->next_to_alloc = 0;
382 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
383 struct rte_mempool *mb_pool)
385 int idx = rx_ring->index;
387 phys_addr_t bd_address;
389 bd_address = (phys_addr_t)
390 rte_mem_virt2iova((const void *)rx_ring->bd_base);
391 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
392 lower_32_bits((uint64_t)bd_address));
393 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
394 upper_32_bits((uint64_t)bd_address));
395 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
396 ENETC_RTBLENR_LEN(rx_ring->bd_count));
398 rx_ring->mb_pool = mb_pool;
399 rx_ring->rcir = (void *)((size_t)hw->reg +
400 ENETC_BDR(RX, idx, ENETC_RBCIR));
401 enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
402 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
403 RTE_PKTMBUF_HEADROOM);
404 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
405 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
409 enetc_rx_queue_setup(struct rte_eth_dev *dev,
410 uint16_t rx_queue_id,
412 unsigned int socket_id __rte_unused,
413 const struct rte_eth_rxconf *rx_conf,
414 struct rte_mempool *mb_pool)
417 struct enetc_bdr *rx_ring;
418 struct rte_eth_dev_data *data = dev->data;
419 struct enetc_eth_adapter *adapter =
420 ENETC_DEV_PRIVATE(data->dev_private);
421 uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
423 PMD_INIT_FUNC_TRACE();
424 if (nb_rx_desc > MAX_BD_COUNT)
427 rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
428 if (rx_ring == NULL) {
429 ENETC_PMD_ERR("Failed to allocate RX ring memory");
434 err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
438 rx_ring->index = rx_queue_id;
440 enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
441 data->rx_queues[rx_queue_id] = rx_ring;
443 if (!rx_conf->rx_deferred_start) {
445 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
447 dev->data->rx_queue_state[rx_ring->index] =
448 RTE_ETH_QUEUE_STATE_STARTED;
450 dev->data->rx_queue_state[rx_ring->index] =
451 RTE_ETH_QUEUE_STATE_STOPPED;
454 rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
455 RTE_ETHER_CRC_LEN : 0);
465 enetc_rx_queue_release(void *rxq)
470 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
471 struct enetc_eth_hw *eth_hw =
472 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
473 struct enetc_swbd *q_swbd;
478 /* Disable the ring */
480 val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
481 val &= (~ENETC_RBMR_EN);
482 enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
485 i = rx_ring->next_to_clean;
486 q_swbd = &rx_ring->q_swbd[i];
487 while (i != rx_ring->next_to_use) {
488 rte_pktmbuf_free(q_swbd->buffer_addr);
489 q_swbd->buffer_addr = NULL;
492 if (unlikely(i == rx_ring->bd_count)) {
494 q_swbd = &rx_ring->q_swbd[i];
498 enetc_free_bdr(rx_ring);
503 int enetc_stats_get(struct rte_eth_dev *dev,
504 struct rte_eth_stats *stats)
506 struct enetc_eth_hw *hw =
507 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
508 struct enetc_hw *enetc_hw = &hw->hw;
510 /* Total received packets, bad + good, if we want to get counters of
511 * only good received packets then use ENETC_PM0_RFRM,
512 * ENETC_PM0_TFRM registers.
514 stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
515 stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
516 stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
517 stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
518 /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
521 stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
522 stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
523 stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
529 enetc_stats_reset(struct rte_eth_dev *dev)
531 struct enetc_eth_hw *hw =
532 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
533 struct enetc_hw *enetc_hw = &hw->hw;
535 enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
541 enetc_dev_close(struct rte_eth_dev *dev)
545 PMD_INIT_FUNC_TRACE();
548 for (i = 0; i < dev->data->nb_rx_queues; i++) {
549 enetc_rx_queue_release(dev->data->rx_queues[i]);
550 dev->data->rx_queues[i] = NULL;
552 dev->data->nb_rx_queues = 0;
554 for (i = 0; i < dev->data->nb_tx_queues; i++) {
555 enetc_tx_queue_release(dev->data->tx_queues[i]);
556 dev->data->tx_queues[i] = NULL;
558 dev->data->nb_tx_queues = 0;
562 enetc_promiscuous_enable(struct rte_eth_dev *dev)
564 struct enetc_eth_hw *hw =
565 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
566 struct enetc_hw *enetc_hw = &hw->hw;
569 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
571 /* Setting to enable promiscuous mode*/
572 psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
574 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
580 enetc_promiscuous_disable(struct rte_eth_dev *dev)
582 struct enetc_eth_hw *hw =
583 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
584 struct enetc_hw *enetc_hw = &hw->hw;
587 /* Setting to disable promiscuous mode for SI0*/
588 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
589 psipmr &= (~ENETC_PSIPMR_SET_UP(0));
591 if (dev->data->all_multicast == 0)
592 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
594 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
600 enetc_allmulticast_enable(struct rte_eth_dev *dev)
602 struct enetc_eth_hw *hw =
603 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
604 struct enetc_hw *enetc_hw = &hw->hw;
607 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
609 /* Setting to enable allmulticast mode for SI0*/
610 psipmr |= ENETC_PSIPMR_SET_MP(0);
612 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
618 enetc_allmulticast_disable(struct rte_eth_dev *dev)
620 struct enetc_eth_hw *hw =
621 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
622 struct enetc_hw *enetc_hw = &hw->hw;
625 if (dev->data->promiscuous == 1)
626 return 0; /* must remain in all_multicast mode */
628 /* Setting to disable all multicast mode for SI0*/
629 psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
630 ~(ENETC_PSIPMR_SET_MP(0));
632 enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
638 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
640 struct enetc_eth_hw *hw =
641 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 struct enetc_hw *enetc_hw = &hw->hw;
643 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
645 /* check that mtu is within the allowed range */
646 if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
650 * Refuse mtu that requires the support of scattered packets
651 * when this feature has not been enabled before.
653 if (dev->data->min_rx_buf_size &&
654 !dev->data->scattered_rx && frame_size >
655 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
656 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
660 if (frame_size > RTE_ETHER_MAX_LEN)
661 dev->data->dev_conf.rxmode.offloads &=
662 DEV_RX_OFFLOAD_JUMBO_FRAME;
664 dev->data->dev_conf.rxmode.offloads &=
665 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
667 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
668 enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
670 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
673 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
674 ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
680 enetc_dev_configure(struct rte_eth_dev *dev)
682 struct enetc_eth_hw *hw =
683 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
684 struct enetc_hw *enetc_hw = &hw->hw;
685 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
686 uint64_t rx_offloads = eth_conf->rxmode.offloads;
687 uint32_t checksum = L3_CKSUM | L4_CKSUM;
689 PMD_INIT_FUNC_TRACE();
691 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
694 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
696 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
697 ENETC_SET_MAXFRM(max_len));
698 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
699 ENETC_MAC_MAXFRM_SIZE);
700 enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
701 2 * ENETC_MAC_MAXFRM_SIZE);
702 dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
706 if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
709 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
710 config |= ENETC_PM0_CRC;
711 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
714 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
715 checksum &= ~L3_CKSUM;
717 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
718 checksum &= ~L4_CKSUM;
720 enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
727 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
729 struct enetc_eth_adapter *priv =
730 ENETC_DEV_PRIVATE(dev->data->dev_private);
731 struct enetc_bdr *rx_ring;
734 rx_ring = dev->data->rx_queues[qidx];
735 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
736 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
738 rx_data = rx_data | ENETC_RBMR_EN;
739 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
741 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
748 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
750 struct enetc_eth_adapter *priv =
751 ENETC_DEV_PRIVATE(dev->data->dev_private);
752 struct enetc_bdr *rx_ring;
755 rx_ring = dev->data->rx_queues[qidx];
756 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
757 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
759 rx_data = rx_data & (~ENETC_RBMR_EN);
760 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
762 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
769 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
771 struct enetc_eth_adapter *priv =
772 ENETC_DEV_PRIVATE(dev->data->dev_private);
773 struct enetc_bdr *tx_ring;
776 tx_ring = dev->data->tx_queues[qidx];
777 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
778 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
780 tx_data = tx_data | ENETC_TBMR_EN;
781 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
783 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
790 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
792 struct enetc_eth_adapter *priv =
793 ENETC_DEV_PRIVATE(dev->data->dev_private);
794 struct enetc_bdr *tx_ring;
797 tx_ring = dev->data->tx_queues[qidx];
798 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
799 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
801 tx_data = tx_data & (~ENETC_TBMR_EN);
802 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
804 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
811 * The set of PCI devices this driver supports
813 static const struct rte_pci_id pci_id_enetc_map[] = {
814 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
815 { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
816 { .vendor_id = 0, /* sentinel */ },
819 /* Features supported by this driver */
820 static const struct eth_dev_ops enetc_ops = {
821 .dev_configure = enetc_dev_configure,
822 .dev_start = enetc_dev_start,
823 .dev_stop = enetc_dev_stop,
824 .dev_close = enetc_dev_close,
825 .link_update = enetc_link_update,
826 .stats_get = enetc_stats_get,
827 .stats_reset = enetc_stats_reset,
828 .promiscuous_enable = enetc_promiscuous_enable,
829 .promiscuous_disable = enetc_promiscuous_disable,
830 .allmulticast_enable = enetc_allmulticast_enable,
831 .allmulticast_disable = enetc_allmulticast_disable,
832 .dev_infos_get = enetc_dev_infos_get,
833 .mtu_set = enetc_mtu_set,
834 .rx_queue_setup = enetc_rx_queue_setup,
835 .rx_queue_start = enetc_rx_queue_start,
836 .rx_queue_stop = enetc_rx_queue_stop,
837 .rx_queue_release = enetc_rx_queue_release,
838 .tx_queue_setup = enetc_tx_queue_setup,
839 .tx_queue_start = enetc_tx_queue_start,
840 .tx_queue_stop = enetc_tx_queue_stop,
841 .tx_queue_release = enetc_tx_queue_release,
842 .dev_supported_ptypes_get = enetc_supported_ptypes_get,
846 * Initialisation of the enetc device
849 * - Pointer to the structure rte_eth_dev
852 * - On success, zero.
853 * - On failure, negative value.
856 enetc_dev_init(struct rte_eth_dev *eth_dev)
859 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
860 struct enetc_eth_hw *hw =
861 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
863 PMD_INIT_FUNC_TRACE();
864 eth_dev->dev_ops = &enetc_ops;
865 eth_dev->rx_pkt_burst = &enetc_recv_pkts;
866 eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
868 /* Retrieving and storing the HW base address of device */
869 hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
870 hw->device_id = pci_dev->id.device_id;
872 error = enetc_hardware_init(hw);
874 ENETC_PMD_ERR("Hardware initialization failed");
878 /* Allocate memory for storing MAC addresses */
879 eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
880 RTE_ETHER_ADDR_LEN, 0);
881 if (!eth_dev->data->mac_addrs) {
882 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
883 "store MAC addresses",
884 RTE_ETHER_ADDR_LEN * 1);
889 /* Copy the permanent MAC address */
890 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
891 ð_dev->data->mac_addrs[0]);
894 enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
895 ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
896 eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
899 ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
900 eth_dev->data->port_id, pci_dev->id.vendor_id,
901 pci_dev->id.device_id);
906 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
908 PMD_INIT_FUNC_TRACE();
913 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
914 struct rte_pci_device *pci_dev)
916 return rte_eth_dev_pci_generic_probe(pci_dev,
917 sizeof(struct enetc_eth_adapter),
922 enetc_pci_remove(struct rte_pci_device *pci_dev)
924 return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
927 static struct rte_pci_driver rte_enetc_pmd = {
928 .id_table = pci_id_enetc_map,
929 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
930 .probe = enetc_pci_probe,
931 .remove = enetc_pci_remove,
934 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
935 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
936 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
938 RTE_INIT(enetc_pmd_init_log)
940 enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
941 if (enetc_logtype_pmd >= 0)
942 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);