1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_ethdev.h"
7 #include "axgbe_common.h"
9 #include "axgbe_rxtx.h"
11 static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
13 return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
14 RTE_ETHER_CRC_LEN + VLAN_HLEN;
18 static int mdio_complete(struct axgbe_port *pdata)
20 if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
26 static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
29 unsigned int mdio_sca, mdio_sccd;
33 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
34 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
35 AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
38 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
39 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
40 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
41 AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
43 timeout = rte_get_timer_cycles() + rte_get_timer_hz();
44 while (time_before(rte_get_timer_cycles(), timeout)) {
46 if (mdio_complete(pdata))
50 PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
54 static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
57 unsigned int mdio_sca, mdio_sccd;
61 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
62 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
63 AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
66 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
67 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
68 AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
70 timeout = rte_get_timer_cycles() + rte_get_timer_hz();
72 while (time_before(rte_get_timer_cycles(), timeout)) {
74 if (mdio_complete(pdata))
78 PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
82 return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
85 static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
86 enum axgbe_mdio_mode mode)
88 unsigned int reg_val = 0;
91 case AXGBE_MDIO_MODE_CL22:
92 if (port > AXGMAC_MAX_C22_PORT)
94 reg_val |= (1 << port);
96 case AXGBE_MDIO_MODE_CL45:
101 AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
106 static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
107 int prtad __rte_unused, int mmd_reg)
109 unsigned int mmd_address, index, offset;
112 if (mmd_reg & MII_ADDR_C45)
113 mmd_address = mmd_reg & ~MII_ADDR_C45;
115 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
117 /* The PCS registers are accessed using mmio. The underlying
118 * management interface uses indirect addressing to access the MMD
119 * register sets. This requires accessing of the PCS register in two
120 * phases, an address phase and a data phase.
122 * The mmio interface is based on 16-bit offsets and values. All
123 * register offsets must therefore be adjusted by left shifting the
124 * offset 1 bit and reading 16 bits of data.
127 index = mmd_address & ~pdata->xpcs_window_mask;
128 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
130 pthread_mutex_lock(&pdata->xpcs_mutex);
132 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
133 mmd_data = XPCS16_IOREAD(pdata, offset);
135 pthread_mutex_unlock(&pdata->xpcs_mutex);
140 static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
141 int prtad __rte_unused,
142 int mmd_reg, int mmd_data)
144 unsigned int mmd_address, index, offset;
146 if (mmd_reg & MII_ADDR_C45)
147 mmd_address = mmd_reg & ~MII_ADDR_C45;
149 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
151 /* The PCS registers are accessed using mmio. The underlying
152 * management interface uses indirect addressing to access the MMD
153 * register sets. This requires accessing of the PCS register in two
154 * phases, an address phase and a data phase.
156 * The mmio interface is based on 16-bit offsets and values. All
157 * register offsets must therefore be adjusted by left shifting the
158 * offset 1 bit and writing 16 bits of data.
161 index = mmd_address & ~pdata->xpcs_window_mask;
162 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
164 pthread_mutex_lock(&pdata->xpcs_mutex);
166 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
167 XPCS16_IOWRITE(pdata, offset, mmd_data);
169 pthread_mutex_unlock(&pdata->xpcs_mutex);
172 static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
175 switch (pdata->vdata->xpcs_access) {
176 case AXGBE_XPCS_ACCESS_V1:
177 PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
179 case AXGBE_XPCS_ACCESS_V2:
181 return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
185 static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
186 int mmd_reg, int mmd_data)
188 switch (pdata->vdata->xpcs_access) {
189 case AXGBE_XPCS_ACCESS_V1:
190 PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
192 case AXGBE_XPCS_ACCESS_V2:
194 return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
198 static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
216 if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
217 AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
222 static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
224 unsigned int max_q_count, q_count;
225 unsigned int reg, reg_val;
228 /* Clear MTL flow control */
229 for (i = 0; i < pdata->rx_q_count; i++)
230 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
232 /* Clear MAC flow control */
233 max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
234 q_count = RTE_MIN(pdata->tx_q_count,
237 for (i = 0; i < q_count; i++) {
238 reg_val = AXGMAC_IOREAD(pdata, reg);
239 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
240 AXGMAC_IOWRITE(pdata, reg, reg_val);
242 reg += MAC_QTFCR_INC;
248 static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
250 unsigned int max_q_count, q_count;
251 unsigned int reg, reg_val;
254 /* Set MTL flow control */
255 for (i = 0; i < pdata->rx_q_count; i++) {
256 unsigned int ehfc = 0;
258 /* Flow control thresholds are established */
259 if (pdata->rx_rfd[i])
262 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
264 PMD_DRV_LOG(DEBUG, "flow control %s for RXq%u\n",
265 ehfc ? "enabled" : "disabled", i);
268 /* Set MAC flow control */
269 max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
270 q_count = RTE_MIN(pdata->tx_q_count,
273 for (i = 0; i < q_count; i++) {
274 reg_val = AXGMAC_IOREAD(pdata, reg);
276 /* Enable transmit flow control */
277 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
279 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
281 AXGMAC_IOWRITE(pdata, reg, reg_val);
283 reg += MAC_QTFCR_INC;
289 static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
291 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
296 static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
298 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
303 static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
306 axgbe_enable_tx_flow_control(pdata);
308 axgbe_disable_tx_flow_control(pdata);
313 static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
316 axgbe_enable_rx_flow_control(pdata);
318 axgbe_disable_rx_flow_control(pdata);
323 static void axgbe_config_flow_control(struct axgbe_port *pdata)
325 axgbe_config_tx_flow_control(pdata);
326 axgbe_config_rx_flow_control(pdata);
328 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
331 static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
333 unsigned int q_fifo_size)
335 unsigned int frame_fifo_size;
336 unsigned int rfa, rfd;
338 frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
340 /* This path deals with just maximum frame sizes which are
341 * limited to a jumbo frame of 9,000 (plus headers, etc.)
342 * so we can never exceed the maximum allowable RFA/RFD
345 if (q_fifo_size <= 2048) {
346 /* rx_rfd to zero to signal no flow control */
347 pdata->rx_rfa[queue] = 0;
348 pdata->rx_rfd[queue] = 0;
352 if (q_fifo_size <= 4096) {
353 /* Between 2048 and 4096 */
354 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
355 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
359 if (q_fifo_size <= frame_fifo_size) {
360 /* Between 4096 and max-frame */
361 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
362 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
366 if (q_fifo_size <= (frame_fifo_size * 3)) {
367 /* Between max-frame and 3 max-frames,
368 * trigger if we get just over a frame of data and
369 * resume when we have just under half a frame left.
371 rfa = q_fifo_size - frame_fifo_size;
372 rfd = rfa + (frame_fifo_size / 2);
374 /* Above 3 max-frames - trigger when just over
375 * 2 frames of space available
377 rfa = frame_fifo_size * 2;
378 rfa += AXGMAC_FLOW_CONTROL_UNIT;
379 rfd = rfa + frame_fifo_size;
382 pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
383 pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
386 static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
388 unsigned int q_fifo_size;
391 for (i = 0; i < pdata->rx_q_count; i++) {
392 q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
394 axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
398 static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
402 for (i = 0; i < pdata->rx_q_count; i++) {
403 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
405 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
410 static int __axgbe_exit(struct axgbe_port *pdata)
412 unsigned int count = 2000;
414 /* Issue a software reset */
415 AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
418 /* Poll Until Poll Condition */
419 while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
428 static int axgbe_exit(struct axgbe_port *pdata)
432 /* To guard against possible incorrectly generated interrupts,
433 * issue the software reset twice.
435 ret = __axgbe_exit(pdata);
439 return __axgbe_exit(pdata);
442 static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
444 unsigned int i, count;
446 if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
449 for (i = 0; i < pdata->tx_q_count; i++)
450 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
452 /* Poll Until Poll Condition */
453 for (i = 0; i < pdata->tx_q_count; i++) {
455 while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
466 static void axgbe_config_dma_bus(struct axgbe_port *pdata)
468 /* Set enhanced addressing mode */
469 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
471 /* Out standing read/write requests*/
472 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
473 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
475 /* Set the System Bus mode */
476 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
477 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
478 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
481 static void axgbe_config_dma_cache(struct axgbe_port *pdata)
483 unsigned int arcache, awcache, arwcache;
486 AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
487 AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
490 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
491 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
492 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
493 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
494 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
495 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
496 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
497 AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
500 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
501 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
502 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
503 AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
506 static void axgbe_config_edma_control(struct axgbe_port *pdata)
508 AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
509 AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
512 static int axgbe_config_osp_mode(struct axgbe_port *pdata)
514 /* Force DMA to operate on second packet before closing descriptors
517 struct axgbe_tx_queue *txq;
520 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
521 txq = pdata->eth_dev->data->tx_queues[i];
522 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
529 static int axgbe_config_pblx8(struct axgbe_port *pdata)
531 struct axgbe_tx_queue *txq;
534 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
535 txq = pdata->eth_dev->data->tx_queues[i];
536 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
542 static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
544 struct axgbe_tx_queue *txq;
547 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
548 txq = pdata->eth_dev->data->tx_queues[i];
549 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
556 static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
558 struct axgbe_rx_queue *rxq;
561 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
562 rxq = pdata->eth_dev->data->rx_queues[i];
563 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
570 static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
572 struct axgbe_rx_queue *rxq;
575 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
576 rxq = pdata->eth_dev->data->rx_queues[i];
578 rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
579 RTE_PKTMBUF_HEADROOM;
580 rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
581 ~(AXGBE_RX_BUF_ALIGN - 1);
583 if (rxq->buf_size > pdata->rx_buf_size)
584 pdata->rx_buf_size = rxq->buf_size;
586 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
591 static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
592 unsigned int index, unsigned int val)
596 if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
599 AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
601 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
602 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
603 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
604 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
608 if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
617 int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
619 struct rte_eth_rss_conf *rss_conf;
620 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
624 rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
626 if (!rss_conf->rss_key)
627 key = (unsigned int *)&pdata->rss_key;
629 key = (unsigned int *)&rss_conf->rss_key;
632 ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
641 int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
646 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
647 ret = axgbe_write_rss_reg(pdata,
648 AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
649 pdata->rss_table[i]);
657 static int axgbe_enable_rss(struct axgbe_port *pdata)
661 /* Program the hash key */
662 ret = axgbe_write_rss_hash_key(pdata);
666 /* Program the lookup table */
667 ret = axgbe_write_rss_lookup_table(pdata);
671 /* Set the RSS options */
672 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
675 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
680 static void axgbe_rss_options(struct axgbe_port *pdata)
682 struct rte_eth_rss_conf *rss_conf;
685 rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
686 pdata->rss_hf = rss_conf->rss_hf;
687 rss_hf = rss_conf->rss_hf;
689 if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
690 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
691 if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
692 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
693 if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
694 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
697 static int axgbe_config_rss(struct axgbe_port *pdata)
701 if (pdata->rss_enable) {
702 /* Initialize RSS hash key and lookup table */
703 uint32_t *key = (uint32_t *)pdata->rss_key;
705 for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
706 *key++ = (uint32_t)rte_rand();
707 for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
708 AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
709 i % pdata->eth_dev->data->nb_rx_queues);
710 axgbe_rss_options(pdata);
711 if (axgbe_enable_rss(pdata)) {
712 PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
716 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
722 static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
724 struct axgbe_tx_queue *txq;
725 unsigned int dma_ch_isr, dma_ch_ier;
728 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
729 txq = pdata->eth_dev->data->tx_queues[i];
731 /* Clear all the interrupts which are set */
732 dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
733 AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
735 /* Clear all interrupt enable bits */
738 /* Enable following interrupts
739 * NIE - Normal Interrupt Summary Enable
740 * AIE - Abnormal Interrupt Summary Enable
741 * FBEE - Fatal Bus Error Enable
743 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
744 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
745 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
747 /* Enable following Rx interrupts
748 * RBUE - Receive Buffer Unavailable Enable
749 * RIE - Receive Interrupt Enable (unless using
750 * per channel interrupts in edge triggered
753 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
755 AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
759 static void wrapper_tx_desc_init(struct axgbe_port *pdata)
761 struct axgbe_tx_queue *txq;
764 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
765 txq = pdata->eth_dev->data->tx_queues[i];
768 /* Update the total number of Tx descriptors */
769 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
770 /* Update the starting address of descriptor ring */
771 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
772 high32_value(txq->ring_phys_addr));
773 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
774 low32_value(txq->ring_phys_addr));
778 static int wrapper_rx_desc_init(struct axgbe_port *pdata)
780 struct axgbe_rx_queue *rxq;
781 struct rte_mbuf *mbuf;
782 volatile union axgbe_rx_desc *desc;
785 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
786 rxq = pdata->eth_dev->data->rx_queues[i];
788 /* Initialize software ring entries */
792 desc = AXGBE_GET_DESC_PT(rxq, 0);
794 for (j = 0; j < rxq->nb_desc; j++) {
795 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
797 PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
798 (unsigned int)rxq->queue_id, j);
799 axgbe_dev_rx_queue_release(rxq);
802 rxq->sw_ring[j] = mbuf;
805 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
807 mbuf->port = rxq->port_id;
810 rte_mbuf_data_iova_default(mbuf));
812 AXGMAC_SET_BITS_LE(desc->read.desc3,
813 RX_NORMAL_DESC3, OWN, 1);
818 /* Update the total number of Rx descriptors */
819 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
821 /* Update the starting address of descriptor ring */
822 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
823 high32_value(rxq->ring_phys_addr));
824 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
825 low32_value(rxq->ring_phys_addr));
826 /* Update the Rx Descriptor Tail Pointer */
827 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
828 low32_value(rxq->ring_phys_addr +
830 sizeof(union axgbe_rx_desc)));
835 static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
839 /* Set Tx to weighted round robin scheduling algorithm */
840 AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
842 /* Set Tx traffic classes to use WRR algorithm with equal weights */
843 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
844 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
846 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
849 /* Set Rx to strict priority algorithm */
850 AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
853 static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
857 for (i = 0; i < pdata->tx_q_count; i++)
858 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
863 static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
867 for (i = 0; i < pdata->rx_q_count; i++)
868 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
873 static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
878 for (i = 0; i < pdata->tx_q_count; i++)
879 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
884 static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
889 for (i = 0; i < pdata->rx_q_count; i++)
890 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
895 /*Distrubting fifo size */
896 static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
898 unsigned int fifo_size;
899 unsigned int q_fifo_size;
900 unsigned int p_fifo, i;
902 fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
903 pdata->hw_feat.rx_fifo_size);
904 q_fifo_size = fifo_size / pdata->rx_q_count;
906 /* Calculate the fifo setting by dividing the queue's fifo size
907 * by the fifo allocation increment (with 0 representing the
908 * base allocation increment so decrement the result
911 p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
915 for (i = 0; i < pdata->rx_q_count; i++)
916 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
917 pdata->fifo = p_fifo;
919 /*Calculate and config Flow control threshold*/
920 axgbe_calculate_flow_control_threshold(pdata);
921 axgbe_config_flow_control_threshold(pdata);
923 PMD_DRV_LOG(DEBUG, "%d Rx hardware queues, %d byte fifo per queue\n",
924 pdata->rx_q_count, q_fifo_size);
927 static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
929 unsigned int fifo_size;
930 unsigned int q_fifo_size;
931 unsigned int p_fifo, i;
933 fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
934 pdata->hw_feat.tx_fifo_size);
935 q_fifo_size = fifo_size / pdata->tx_q_count;
937 /* Calculate the fifo setting by dividing the queue's fifo size
938 * by the fifo allocation increment (with 0 representing the
939 * base allocation increment so decrement the result
942 p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
946 for (i = 0; i < pdata->tx_q_count; i++)
947 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
949 PMD_DRV_LOG(DEBUG, "%d Tx hardware queues, %d byte fifo per queue\n",
950 pdata->tx_q_count, q_fifo_size);
953 static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
955 unsigned int qptc, qptc_extra, queue;
956 unsigned int i, j, reg, reg_val;
958 /* Map the MTL Tx Queues to Traffic Classes
959 * Note: Tx Queues >= Traffic Classes
961 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
962 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
964 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
965 for (j = 0; j < qptc; j++) {
966 PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
967 AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
970 if (i < qptc_extra) {
971 PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
972 AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
977 if (pdata->rss_enable) {
978 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
981 for (i = 0; i < pdata->rx_q_count;) {
982 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
984 if ((i % MTL_RQDCM_Q_PER_REG) &&
985 (i != pdata->rx_q_count))
988 AXGMAC_IOWRITE(pdata, reg, reg_val);
990 reg += MTL_RQDCM_INC;
996 static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
998 unsigned int mtl_q_isr;
999 unsigned int q_count, i;
1001 q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
1002 for (i = 0; i < q_count; i++) {
1003 /* Clear all the interrupts which are set */
1004 mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
1005 AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
1007 /* No MTL interrupts to be enabled */
1008 AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
1012 static uint32_t bitrev32(uint32_t x)
1014 x = (x >> 16) | (x << 16);
1015 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1016 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1017 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1018 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1022 static uint32_t crc32_le(uint32_t crc, uint8_t *p, uint32_t len)
1027 for (i = 0; i < 8; i++)
1028 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
1033 void axgbe_set_mac_hash_table(struct axgbe_port *pdata, u8 *addr, bool add)
1035 uint32_t crc, htable_index, htable_bitmask;
1037 crc = bitrev32(~crc32_le(~0, addr, RTE_ETHER_ADDR_LEN));
1038 crc >>= pdata->hash_table_shift;
1039 htable_index = crc >> 5;
1040 htable_bitmask = 1 << (crc & 0x1f);
1043 pdata->uc_hash_table[htable_index] |= htable_bitmask;
1044 pdata->uc_hash_mac_addr++;
1046 pdata->uc_hash_table[htable_index] &= ~htable_bitmask;
1047 pdata->uc_hash_mac_addr--;
1049 PMD_DRV_LOG(DEBUG, "%s MAC hash table Bit %d at Index %#x\n",
1050 add ? "set" : "clear", (crc & 0x1f), htable_index);
1052 AXGMAC_IOWRITE(pdata, MAC_HTR(htable_index),
1053 pdata->uc_hash_table[htable_index]);
1056 void axgbe_set_mac_addn_addr(struct axgbe_port *pdata, u8 *addr, uint32_t index)
1058 unsigned int mac_addr_hi, mac_addr_lo;
1065 mac_addr = (u8 *)&mac_addr_lo;
1066 mac_addr[0] = addr[0];
1067 mac_addr[1] = addr[1];
1068 mac_addr[2] = addr[2];
1069 mac_addr[3] = addr[3];
1070 mac_addr = (u8 *)&mac_addr_hi;
1071 mac_addr[0] = addr[4];
1072 mac_addr[1] = addr[5];
1074 /*Address Enable: Use this Addr for Perfect Filtering */
1075 AXGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
1078 PMD_DRV_LOG(DEBUG, "%s mac address at %#x\n",
1079 addr ? "set" : "clear", index);
1081 AXGMAC_IOWRITE(pdata, MAC_MACAHR(index), mac_addr_hi);
1082 AXGMAC_IOWRITE(pdata, MAC_MACALR(index), mac_addr_lo);
1085 static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
1087 unsigned int mac_addr_hi, mac_addr_lo;
1089 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
1090 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1091 (addr[1] << 8) | (addr[0] << 0);
1093 AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1094 AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1099 static void axgbe_config_mac_hash_table(struct axgbe_port *pdata)
1101 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1103 pdata->hash_table_shift = 0;
1104 pdata->hash_table_count = 0;
1105 pdata->uc_hash_mac_addr = 0;
1106 memset(pdata->uc_hash_table, 0, sizeof(pdata->uc_hash_table));
1108 if (hw_feat->hash_table_size) {
1109 pdata->hash_table_shift = 26 - (hw_feat->hash_table_size >> 7);
1110 pdata->hash_table_count = hw_feat->hash_table_size / 32;
1114 static void axgbe_config_mac_address(struct axgbe_port *pdata)
1116 axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
1119 static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
1123 val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
1125 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1128 static void axgbe_config_mac_speed(struct axgbe_port *pdata)
1130 axgbe_set_speed(pdata, pdata->phy_speed);
1133 static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
1135 if (pdata->rx_csum_enable)
1136 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1138 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1141 static void axgbe_config_mmc(struct axgbe_port *pdata)
1143 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
1146 memset(stats, 0, sizeof(*stats));
1148 /* Set counters to reset on read */
1149 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
1151 /* Reset the counters */
1152 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
1155 static int axgbe_init(struct axgbe_port *pdata)
1159 /* Flush Tx queues */
1160 ret = axgbe_flush_tx_queues(pdata);
1163 /* Initialize DMA related features */
1164 axgbe_config_dma_bus(pdata);
1165 axgbe_config_dma_cache(pdata);
1166 axgbe_config_edma_control(pdata);
1167 axgbe_config_osp_mode(pdata);
1168 axgbe_config_pblx8(pdata);
1169 axgbe_config_tx_pbl_val(pdata);
1170 axgbe_config_rx_pbl_val(pdata);
1171 axgbe_config_rx_buffer_size(pdata);
1172 axgbe_config_rss(pdata);
1173 wrapper_tx_desc_init(pdata);
1174 ret = wrapper_rx_desc_init(pdata);
1177 axgbe_enable_dma_interrupts(pdata);
1179 /* Initialize MTL related features */
1180 axgbe_config_mtl_mode(pdata);
1181 axgbe_config_queue_mapping(pdata);
1182 axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
1183 axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
1184 axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
1185 axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
1186 axgbe_config_tx_fifo_size(pdata);
1187 axgbe_config_rx_fifo_size(pdata);
1189 axgbe_enable_mtl_interrupts(pdata);
1191 /* Initialize MAC related features */
1192 axgbe_config_mac_hash_table(pdata);
1193 axgbe_config_mac_address(pdata);
1194 axgbe_config_jumbo_enable(pdata);
1195 axgbe_config_flow_control(pdata);
1196 axgbe_config_mac_speed(pdata);
1197 axgbe_config_checksum_offload(pdata);
1198 axgbe_config_mmc(pdata);
1203 void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
1205 hw_if->exit = axgbe_exit;
1206 hw_if->config_flow_control = axgbe_config_flow_control;
1208 hw_if->init = axgbe_init;
1210 hw_if->read_mmd_regs = axgbe_read_mmd_regs;
1211 hw_if->write_mmd_regs = axgbe_write_mmd_regs;
1213 hw_if->set_speed = axgbe_set_speed;
1215 hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
1216 hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
1217 hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
1219 hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
1220 hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;