1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_ethdev.h"
7 #include "axgbe_common.h"
9 #include "axgbe_rxtx.h"
11 static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
13 return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
14 RTE_ETHER_CRC_LEN + VLAN_HLEN;
18 static int mdio_complete(struct axgbe_port *pdata)
20 if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
26 static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
29 unsigned int mdio_sca, mdio_sccd;
33 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
34 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
35 AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
38 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
39 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
40 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
41 AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
43 timeout = rte_get_timer_cycles() + rte_get_timer_hz();
44 while (time_before(rte_get_timer_cycles(), timeout)) {
46 if (mdio_complete(pdata))
50 PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
54 static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
57 unsigned int mdio_sca, mdio_sccd;
61 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
62 AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
63 AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
66 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
67 AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
68 AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
70 timeout = rte_get_timer_cycles() + rte_get_timer_hz();
72 while (time_before(rte_get_timer_cycles(), timeout)) {
74 if (mdio_complete(pdata))
78 PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
82 return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
85 static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
86 enum axgbe_mdio_mode mode)
88 unsigned int reg_val = 0;
91 case AXGBE_MDIO_MODE_CL22:
92 if (port > AXGMAC_MAX_C22_PORT)
94 reg_val |= (1 << port);
96 case AXGBE_MDIO_MODE_CL45:
101 AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
106 static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
107 int prtad __rte_unused, int mmd_reg)
109 unsigned int mmd_address, index, offset;
112 if (mmd_reg & MII_ADDR_C45)
113 mmd_address = mmd_reg & ~MII_ADDR_C45;
115 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
117 /* The PCS registers are accessed using mmio. The underlying
118 * management interface uses indirect addressing to access the MMD
119 * register sets. This requires accessing of the PCS register in two
120 * phases, an address phase and a data phase.
122 * The mmio interface is based on 16-bit offsets and values. All
123 * register offsets must therefore be adjusted by left shifting the
124 * offset 1 bit and reading 16 bits of data.
127 index = mmd_address & ~pdata->xpcs_window_mask;
128 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
130 pthread_mutex_lock(&pdata->xpcs_mutex);
132 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
133 mmd_data = XPCS16_IOREAD(pdata, offset);
135 pthread_mutex_unlock(&pdata->xpcs_mutex);
140 static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
141 int prtad __rte_unused,
142 int mmd_reg, int mmd_data)
144 unsigned int mmd_address, index, offset;
146 if (mmd_reg & MII_ADDR_C45)
147 mmd_address = mmd_reg & ~MII_ADDR_C45;
149 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
151 /* The PCS registers are accessed using mmio. The underlying
152 * management interface uses indirect addressing to access the MMD
153 * register sets. This requires accessing of the PCS register in two
154 * phases, an address phase and a data phase.
156 * The mmio interface is based on 16-bit offsets and values. All
157 * register offsets must therefore be adjusted by left shifting the
158 * offset 1 bit and writing 16 bits of data.
161 index = mmd_address & ~pdata->xpcs_window_mask;
162 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
164 pthread_mutex_lock(&pdata->xpcs_mutex);
166 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
167 XPCS16_IOWRITE(pdata, offset, mmd_data);
169 pthread_mutex_unlock(&pdata->xpcs_mutex);
172 static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
175 switch (pdata->vdata->xpcs_access) {
176 case AXGBE_XPCS_ACCESS_V1:
177 PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
179 case AXGBE_XPCS_ACCESS_V2:
181 return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
185 static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
186 int mmd_reg, int mmd_data)
188 switch (pdata->vdata->xpcs_access) {
189 case AXGBE_XPCS_ACCESS_V1:
190 PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
192 case AXGBE_XPCS_ACCESS_V2:
194 return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
198 static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
216 if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
217 AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
222 static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
224 unsigned int max_q_count, q_count;
225 unsigned int reg, reg_val;
228 /* Clear MTL flow control */
229 for (i = 0; i < pdata->rx_q_count; i++)
230 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
232 /* Clear MAC flow control */
233 max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
234 q_count = RTE_MIN(pdata->tx_q_count,
237 for (i = 0; i < q_count; i++) {
238 reg_val = AXGMAC_IOREAD(pdata, reg);
239 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
240 AXGMAC_IOWRITE(pdata, reg, reg_val);
242 reg += MAC_QTFCR_INC;
248 static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
250 unsigned int max_q_count, q_count;
251 unsigned int reg, reg_val;
254 /* Set MTL flow control */
255 for (i = 0; i < pdata->rx_q_count; i++) {
256 unsigned int ehfc = 0;
258 /* Flow control thresholds are established */
259 if (pdata->rx_rfd[i])
262 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
265 /* Set MAC flow control */
266 max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
267 q_count = RTE_MIN(pdata->tx_q_count,
270 for (i = 0; i < q_count; i++) {
271 reg_val = AXGMAC_IOREAD(pdata, reg);
273 /* Enable transmit flow control */
274 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
276 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
278 AXGMAC_IOWRITE(pdata, reg, reg_val);
280 reg += MAC_QTFCR_INC;
286 static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
288 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
293 static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
295 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
300 static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
303 axgbe_enable_tx_flow_control(pdata);
305 axgbe_disable_tx_flow_control(pdata);
310 static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
313 axgbe_enable_rx_flow_control(pdata);
315 axgbe_disable_rx_flow_control(pdata);
320 static void axgbe_config_flow_control(struct axgbe_port *pdata)
322 axgbe_config_tx_flow_control(pdata);
323 axgbe_config_rx_flow_control(pdata);
325 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
328 static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
330 unsigned int q_fifo_size)
332 unsigned int frame_fifo_size;
333 unsigned int rfa, rfd;
335 frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
337 /* This path deals with just maximum frame sizes which are
338 * limited to a jumbo frame of 9,000 (plus headers, etc.)
339 * so we can never exceed the maximum allowable RFA/RFD
342 if (q_fifo_size <= 2048) {
343 /* rx_rfd to zero to signal no flow control */
344 pdata->rx_rfa[queue] = 0;
345 pdata->rx_rfd[queue] = 0;
349 if (q_fifo_size <= 4096) {
350 /* Between 2048 and 4096 */
351 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
352 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
356 if (q_fifo_size <= frame_fifo_size) {
357 /* Between 4096 and max-frame */
358 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
359 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
363 if (q_fifo_size <= (frame_fifo_size * 3)) {
364 /* Between max-frame and 3 max-frames,
365 * trigger if we get just over a frame of data and
366 * resume when we have just under half a frame left.
368 rfa = q_fifo_size - frame_fifo_size;
369 rfd = rfa + (frame_fifo_size / 2);
371 /* Above 3 max-frames - trigger when just over
372 * 2 frames of space available
374 rfa = frame_fifo_size * 2;
375 rfa += AXGMAC_FLOW_CONTROL_UNIT;
376 rfd = rfa + frame_fifo_size;
379 pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
380 pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
383 static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
385 unsigned int q_fifo_size;
388 for (i = 0; i < pdata->rx_q_count; i++) {
389 q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
391 axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
395 static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
399 for (i = 0; i < pdata->rx_q_count; i++) {
400 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
402 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
407 static int __axgbe_exit(struct axgbe_port *pdata)
409 unsigned int count = 2000;
411 /* Issue a software reset */
412 AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
415 /* Poll Until Poll Condition */
416 while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
425 static int axgbe_exit(struct axgbe_port *pdata)
429 /* To guard against possible incorrectly generated interrupts,
430 * issue the software reset twice.
432 ret = __axgbe_exit(pdata);
436 return __axgbe_exit(pdata);
439 static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
441 unsigned int i, count;
443 if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
446 for (i = 0; i < pdata->tx_q_count; i++)
447 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
449 /* Poll Until Poll Condition */
450 for (i = 0; i < pdata->tx_q_count; i++) {
452 while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
463 static void axgbe_config_dma_bus(struct axgbe_port *pdata)
465 /* Set enhanced addressing mode */
466 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
468 /* Out standing read/write requests*/
469 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
470 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
472 /* Set the System Bus mode */
473 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
474 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
475 AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
478 static void axgbe_config_dma_cache(struct axgbe_port *pdata)
480 unsigned int arcache, awcache, arwcache;
483 AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
484 AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
487 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
488 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
489 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
490 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
491 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
492 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
493 AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
494 AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
497 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
498 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
499 AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
500 AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
503 static void axgbe_config_edma_control(struct axgbe_port *pdata)
505 AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
506 AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
509 static int axgbe_config_osp_mode(struct axgbe_port *pdata)
511 /* Force DMA to operate on second packet before closing descriptors
514 struct axgbe_tx_queue *txq;
517 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
518 txq = pdata->eth_dev->data->tx_queues[i];
519 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
526 static int axgbe_config_pblx8(struct axgbe_port *pdata)
528 struct axgbe_tx_queue *txq;
531 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
532 txq = pdata->eth_dev->data->tx_queues[i];
533 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
539 static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
541 struct axgbe_tx_queue *txq;
544 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
545 txq = pdata->eth_dev->data->tx_queues[i];
546 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
553 static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
555 struct axgbe_rx_queue *rxq;
558 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
559 rxq = pdata->eth_dev->data->rx_queues[i];
560 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
567 static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
569 struct axgbe_rx_queue *rxq;
572 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
573 rxq = pdata->eth_dev->data->rx_queues[i];
575 rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
576 RTE_PKTMBUF_HEADROOM;
577 rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
578 ~(AXGBE_RX_BUF_ALIGN - 1);
580 if (rxq->buf_size > pdata->rx_buf_size)
581 pdata->rx_buf_size = rxq->buf_size;
583 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
588 static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
589 unsigned int index, unsigned int val)
593 if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
596 AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
598 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
599 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
600 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
601 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
605 if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
614 static int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
616 struct rte_eth_rss_conf *rss_conf;
617 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
621 rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
623 if (!rss_conf->rss_key)
624 key = (unsigned int *)&pdata->rss_key;
626 key = (unsigned int *)&rss_conf->rss_key;
629 ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
638 static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
643 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
644 ret = axgbe_write_rss_reg(pdata,
645 AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
646 pdata->rss_table[i]);
654 static int axgbe_enable_rss(struct axgbe_port *pdata)
658 /* Program the hash key */
659 ret = axgbe_write_rss_hash_key(pdata);
663 /* Program the lookup table */
664 ret = axgbe_write_rss_lookup_table(pdata);
668 /* Set the RSS options */
669 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
672 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
677 static void axgbe_rss_options(struct axgbe_port *pdata)
679 struct rte_eth_rss_conf *rss_conf;
682 rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
683 rss_hf = rss_conf->rss_hf;
685 if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
686 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
687 if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
688 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
689 if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
690 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
693 static int axgbe_config_rss(struct axgbe_port *pdata)
697 if (pdata->rss_enable) {
698 /* Initialize RSS hash key and lookup table */
699 uint32_t *key = (uint32_t *)pdata->rss_key;
701 for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
702 *key++ = (uint32_t)rte_rand();
703 for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
704 AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
705 i % pdata->eth_dev->data->nb_rx_queues);
706 axgbe_rss_options(pdata);
707 if (axgbe_enable_rss(pdata)) {
708 PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
712 AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
718 static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
720 struct axgbe_tx_queue *txq;
721 unsigned int dma_ch_isr, dma_ch_ier;
724 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
725 txq = pdata->eth_dev->data->tx_queues[i];
727 /* Clear all the interrupts which are set */
728 dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
729 AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
731 /* Clear all interrupt enable bits */
734 /* Enable following interrupts
735 * NIE - Normal Interrupt Summary Enable
736 * AIE - Abnormal Interrupt Summary Enable
737 * FBEE - Fatal Bus Error Enable
739 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
740 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
741 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
743 /* Enable following Rx interrupts
744 * RBUE - Receive Buffer Unavailable Enable
745 * RIE - Receive Interrupt Enable (unless using
746 * per channel interrupts in edge triggered
749 AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
751 AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
755 static void wrapper_tx_desc_init(struct axgbe_port *pdata)
757 struct axgbe_tx_queue *txq;
760 for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
761 txq = pdata->eth_dev->data->tx_queues[i];
764 /* Update the total number of Tx descriptors */
765 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
766 /* Update the starting address of descriptor ring */
767 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
768 high32_value(txq->ring_phys_addr));
769 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
770 low32_value(txq->ring_phys_addr));
774 static int wrapper_rx_desc_init(struct axgbe_port *pdata)
776 struct axgbe_rx_queue *rxq;
777 struct rte_mbuf *mbuf;
778 volatile union axgbe_rx_desc *desc;
781 for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
782 rxq = pdata->eth_dev->data->rx_queues[i];
784 /* Initialize software ring entries */
788 desc = AXGBE_GET_DESC_PT(rxq, 0);
790 for (j = 0; j < rxq->nb_desc; j++) {
791 mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
793 PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
794 (unsigned int)rxq->queue_id, j);
795 axgbe_dev_rx_queue_release(rxq);
798 rxq->sw_ring[j] = mbuf;
801 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
803 mbuf->port = rxq->port_id;
806 rte_mbuf_data_iova_default(mbuf));
808 AXGMAC_SET_BITS_LE(desc->read.desc3,
809 RX_NORMAL_DESC3, OWN, 1);
814 /* Update the total number of Rx descriptors */
815 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
817 /* Update the starting address of descriptor ring */
818 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
819 high32_value(rxq->ring_phys_addr));
820 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
821 low32_value(rxq->ring_phys_addr));
822 /* Update the Rx Descriptor Tail Pointer */
823 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
824 low32_value(rxq->ring_phys_addr +
826 sizeof(union axgbe_rx_desc)));
831 static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
835 /* Set Tx to weighted round robin scheduling algorithm */
836 AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
838 /* Set Tx traffic classes to use WRR algorithm with equal weights */
839 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
840 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
842 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
845 /* Set Rx to strict priority algorithm */
846 AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
849 static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
853 for (i = 0; i < pdata->tx_q_count; i++)
854 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
859 static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
863 for (i = 0; i < pdata->rx_q_count; i++)
864 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
869 static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
874 for (i = 0; i < pdata->tx_q_count; i++)
875 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
880 static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
885 for (i = 0; i < pdata->rx_q_count; i++)
886 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
891 /*Distrubting fifo size */
892 static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
894 unsigned int fifo_size;
895 unsigned int q_fifo_size;
896 unsigned int p_fifo, i;
898 fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
899 pdata->hw_feat.rx_fifo_size);
900 q_fifo_size = fifo_size / pdata->rx_q_count;
902 /* Calculate the fifo setting by dividing the queue's fifo size
903 * by the fifo allocation increment (with 0 representing the
904 * base allocation increment so decrement the result
907 p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
911 for (i = 0; i < pdata->rx_q_count; i++)
912 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
913 pdata->fifo = p_fifo;
915 /*Calculate and config Flow control threshold*/
916 axgbe_calculate_flow_control_threshold(pdata);
917 axgbe_config_flow_control_threshold(pdata);
920 static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
922 unsigned int fifo_size;
923 unsigned int q_fifo_size;
924 unsigned int p_fifo, i;
926 fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
927 pdata->hw_feat.tx_fifo_size);
928 q_fifo_size = fifo_size / pdata->tx_q_count;
930 /* Calculate the fifo setting by dividing the queue's fifo size
931 * by the fifo allocation increment (with 0 representing the
932 * base allocation increment so decrement the result
935 p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
939 for (i = 0; i < pdata->tx_q_count; i++)
940 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
943 static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
945 unsigned int qptc, qptc_extra, queue;
946 unsigned int i, j, reg, reg_val;
948 /* Map the MTL Tx Queues to Traffic Classes
949 * Note: Tx Queues >= Traffic Classes
951 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
952 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
954 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
955 for (j = 0; j < qptc; j++)
956 AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
959 AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
963 if (pdata->rss_enable) {
964 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
967 for (i = 0; i < pdata->rx_q_count;) {
968 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
970 if ((i % MTL_RQDCM_Q_PER_REG) &&
971 (i != pdata->rx_q_count))
974 AXGMAC_IOWRITE(pdata, reg, reg_val);
976 reg += MTL_RQDCM_INC;
982 static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
984 unsigned int mtl_q_isr;
985 unsigned int q_count, i;
987 q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
988 for (i = 0; i < q_count; i++) {
989 /* Clear all the interrupts which are set */
990 mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
991 AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
993 /* No MTL interrupts to be enabled */
994 AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
998 static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
1000 unsigned int mac_addr_hi, mac_addr_lo;
1002 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
1003 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1004 (addr[1] << 8) | (addr[0] << 0);
1006 AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1007 AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1012 static void axgbe_config_mac_address(struct axgbe_port *pdata)
1014 axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
1017 static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
1021 val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
1023 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1026 static void axgbe_config_mac_speed(struct axgbe_port *pdata)
1028 axgbe_set_speed(pdata, pdata->phy_speed);
1031 static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
1033 if (pdata->rx_csum_enable)
1034 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1036 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1039 static int axgbe_init(struct axgbe_port *pdata)
1043 /* Flush Tx queues */
1044 ret = axgbe_flush_tx_queues(pdata);
1047 /* Initialize DMA related features */
1048 axgbe_config_dma_bus(pdata);
1049 axgbe_config_dma_cache(pdata);
1050 axgbe_config_edma_control(pdata);
1051 axgbe_config_osp_mode(pdata);
1052 axgbe_config_pblx8(pdata);
1053 axgbe_config_tx_pbl_val(pdata);
1054 axgbe_config_rx_pbl_val(pdata);
1055 axgbe_config_rx_buffer_size(pdata);
1056 axgbe_config_rss(pdata);
1057 wrapper_tx_desc_init(pdata);
1058 ret = wrapper_rx_desc_init(pdata);
1061 axgbe_enable_dma_interrupts(pdata);
1063 /* Initialize MTL related features */
1064 axgbe_config_mtl_mode(pdata);
1065 axgbe_config_queue_mapping(pdata);
1066 axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
1067 axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
1068 axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
1069 axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
1070 axgbe_config_tx_fifo_size(pdata);
1071 axgbe_config_rx_fifo_size(pdata);
1073 axgbe_enable_mtl_interrupts(pdata);
1075 /* Initialize MAC related features */
1076 axgbe_config_mac_address(pdata);
1077 axgbe_config_jumbo_enable(pdata);
1078 axgbe_config_flow_control(pdata);
1079 axgbe_config_mac_speed(pdata);
1080 axgbe_config_checksum_offload(pdata);
1085 void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
1087 hw_if->exit = axgbe_exit;
1088 hw_if->config_flow_control = axgbe_config_flow_control;
1090 hw_if->init = axgbe_init;
1092 hw_if->read_mmd_regs = axgbe_read_mmd_regs;
1093 hw_if->write_mmd_regs = axgbe_write_mmd_regs;
1095 hw_if->set_speed = axgbe_set_speed;
1097 hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
1098 hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
1099 hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
1101 hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
1102 hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;