1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 /* Copyright (C) 2014-2017 aQuantia Corporation. */
4 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
6 #include "../atl_types.h"
9 #include "../atl_hw_regs.h"
10 #include "hw_atl_utils.h"
11 #include "hw_atl_llh.h"
12 #include "hw_atl_b0_internal.h"
13 #include "hw_atl_llh_internal.h"
14 #include "../atl_logs.h"
16 int hw_atl_b0_hw_reset(struct aq_hw_s *self)
20 err = hw_atl_utils_soft_reset(self);
24 self->aq_fw_ops->set_state(self, MPI_RESET);
29 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
33 unsigned int i_priority = 0U;
35 /* TPS Descriptor rate init */
36 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
37 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
40 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
42 /* TPS TC credits init */
43 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
44 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
46 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
47 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
48 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
49 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
52 buff_size = HW_ATL_B0_TXBUF_MAX;
54 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
55 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
59 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
64 /* QoS Rx buf size per TC */
66 buff_size = HW_ATL_B0_RXBUF_MAX;
68 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
69 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
71 (1024U / 32U) * 66U) /
73 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
75 (1024U / 32U) * 50U) /
77 hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
79 /* QoS 802.1p priority -> TC mapping */
80 for (i_priority = 8U; i_priority--;)
81 hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
83 return aq_hw_err_from_flags(self);
86 /* calc hash only in IPv4 header, regardless of presence of TCP */
87 #define pif_rpf_rss_ipv4_hdr_only_i (1 << 4)
88 /* calc hash only if TCP header and IPv4 */
89 #define pif_rpf_rss_ipv4_tcp_hdr_only_i (1 << 3)
90 /* calc hash only in IPv6 header, regardless of presence of TCP */
91 #define pif_rpf_rss_ipv6_hdr_only_i (1 << 2)
92 /* calc hash only if TCP header and IPv4 */
93 #define pif_rpf_rss_ipv6_tcp_hdr_only_i (1 << 1)
94 /* bug 5124 - rss hashing types - FIXME */
95 #define pif_rpf_rss_dont_use_udp_i (1 << 0)
97 static int hw_atl_b0_hw_rss_hash_type_set(struct aq_hw_s *self)
100 unsigned int control_reg_val =
101 IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U;
103 /* RSS hash type set for IP/TCP */
104 control_reg_val |= pif_rpf_rss_ipv4_hdr_only_i;//0x1EU;
106 aq_hw_write_reg(self, 0x5040U, control_reg_val);
108 return aq_hw_err_from_flags(self);
111 int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
112 struct aq_rss_parameters *rss_params)
114 struct aq_hw_cfg_s *cfg = self->aq_nic_cfg;
117 unsigned int addr = 0U;
119 for (i = 10, addr = 0U; i--; ++addr) {
120 u32 key_data = cfg->is_rss ?
121 htonl(rss_params->hash_secret_key[i]) : 0U;
122 hw_atl_rpf_rss_key_wr_data_set(self, key_data);
123 hw_atl_rpf_rss_key_addr_set(self, addr);
124 hw_atl_rpf_rss_key_wr_en_set(self, 1U);
125 AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
131 /* RSS Ring selection */
132 hw_atl_reg_rx_flr_rss_control1set(self,
133 cfg->is_rss ? 0xB3333333U : 0x00000000U);
134 hw_atl_b0_hw_rss_hash_type_set(self);
136 err = aq_hw_err_from_flags(self);
143 int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
144 struct aq_rss_parameters *rss_params)
146 u8 *indirection_table = rss_params->indirection_table;
147 u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
154 for (i = 0; i < HW_ATL_B0_RSS_REDIRECTION_MAX; i++) {
155 val |= (u32)(indirection_table[i] % num_rss_queues) << shift;
161 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, val & 0xffff);
162 hw_atl_rpf_rss_redir_tbl_addr_set(self, addr);
164 hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
165 AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
180 static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self)
181 /*struct aq_nic_cfg_s *aq_nic_cfg)*/
185 /* TX checksums offloads*/
186 hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
187 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
189 /* RX checksums offloads*/
190 hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
191 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
194 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
198 unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
199 ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
200 ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
202 for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
203 hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
205 hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
206 hw_atl_rpo_lro_inactive_interval_set(self, 0);
207 hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
209 hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
211 hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
213 hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
215 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
217 hw_atl_rpo_lro_pkt_lim_set(self, 1U);
219 hw_atl_rpo_lro_en_set(self,
220 self->aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
222 return aq_hw_err_from_flags(self);
226 int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
228 /* Tx TC/RSS number config */
229 hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
231 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
232 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
233 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
236 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
239 aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
240 0x00010000U : 0x00000000U);
241 hw_atl_tdm_tx_dca_en_set(self, 0U);
242 hw_atl_tdm_tx_dca_mode_set(self, 0U);
244 hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
246 return aq_hw_err_from_flags(self);
250 int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
252 struct aq_hw_cfg_s *cfg = self->aq_nic_cfg;
255 /* Rx TC/RSS number config */
256 hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); /* 1: 4TC/8Queues */
258 /* Rx flow control */
259 hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
261 /* RSS Ring selection */
262 hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
263 0xB3333333U : 0x00000000U);
265 /* Multicast filters */
266 for (i = HW_ATL_B0_MAC_MAX; i--;) {
267 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
268 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
271 hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
272 hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
275 hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
276 hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
278 /* VLAN proimisc bu defauld */
279 hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
282 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
284 hw_atl_b0_hw_rss_hash_type_set(self);
286 hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
287 hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
289 hw_atl_rdm_rx_dca_en_set(self, 0U);
290 hw_atl_rdm_rx_dca_mode_set(self, 0U);
292 return aq_hw_err_from_flags(self);
295 static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
305 h = (mac_addr[0] << 8) | (mac_addr[1]);
306 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
307 (mac_addr[4] << 8) | mac_addr[5];
309 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
310 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
311 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
312 hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
314 err = aq_hw_err_from_flags(self);
320 int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
322 static u32 aq_hw_atl_igcr_table_[4][2] = {
323 { 0x20000080U, 0x20000080U }, /* AQ_IRQ_INVALID */
324 { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
325 { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
326 { 0x200000A2U, 0x200000A6U } /* AQ_IRQ_MSIX */
332 struct aq_hw_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
334 hw_atl_b0_hw_init_tx_path(self);
335 hw_atl_b0_hw_init_rx_path(self);
337 hw_atl_b0_hw_mac_addr_set(self, mac_addr);
339 self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
340 self->aq_fw_ops->set_state(self, MPI_INIT);
342 hw_atl_b0_hw_qos_set(self);
343 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
344 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
346 /* Force limit MRRS on RDM/TDM to 2K */
347 val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
348 aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
349 (val & ~0x707) | 0x404);
351 /* TX DMA total request limit. B0 hardware is not capable to
352 * handle more than (8K-MRRS) incoming DMA data.
353 * Value 24 in 256byte units
355 aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
357 /* Reset link status and read out initial hardware counters */
358 self->aq_link_status.mbps = 0;
359 self->aq_fw_ops->update_stats(self);
361 err = aq_hw_err_from_flags(self);
366 hw_atl_reg_irq_glb_ctl_set(self,
367 aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
368 [(aq_nic_cfg->vecs > 1U) ?
371 hw_atl_itr_irq_auto_masklsw_set(self, 0xffffffff);
374 hw_atl_reg_gen_irq_map_set(self, 0, 0);
375 hw_atl_reg_gen_irq_map_set(self, 0x80 | ATL_IRQ_CAUSE_LINK, 3);
377 hw_atl_b0_hw_offload_set(self);
383 int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, int index)
385 hw_atl_tdm_tx_desc_en_set(self, 1, index);
386 return aq_hw_err_from_flags(self);
389 int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, int index)
391 hw_atl_rdm_rx_desc_en_set(self, 1, index);
392 return aq_hw_err_from_flags(self);
395 int hw_atl_b0_hw_start(struct aq_hw_s *self)
397 hw_atl_tpb_tx_buff_en_set(self, 1);
398 hw_atl_rpb_rx_buff_en_set(self, 1);
399 return aq_hw_err_from_flags(self);
402 int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, int tail, int index)
404 hw_atl_reg_tx_dma_desc_tail_ptr_set(self, tail, index);
408 int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, uint64_t base_addr,
409 int index, int size, int buff_size, int cpu, int vec)
411 u32 dma_desc_addr_lsw = (u32)base_addr;
412 u32 dma_desc_addr_msw = (u32)(base_addr >> 32);
414 hw_atl_rdm_rx_desc_en_set(self, false, index);
416 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, index);
418 hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
421 hw_atl_reg_rx_dma_desc_base_addressmswset(self, dma_desc_addr_msw,
424 hw_atl_rdm_rx_desc_len_set(self, size / 8U, index);
426 hw_atl_rdm_rx_desc_data_buff_size_set(self, buff_size / 1024U, index);
428 hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, index);
429 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, index);
430 hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, index);
432 /* Rx ring set mode */
434 /* Mapping interrupt vector */
435 hw_atl_itr_irq_map_rx_set(self, vec, index);
436 hw_atl_itr_irq_map_en_rx_set(self, true, index);
438 hw_atl_rdm_cpu_id_set(self, cpu, index);
439 hw_atl_rdm_rx_desc_dca_en_set(self, 0U, index);
440 hw_atl_rdm_rx_head_dca_en_set(self, 0U, index);
441 hw_atl_rdm_rx_pld_dca_en_set(self, 0U, index);
443 return aq_hw_err_from_flags(self);
446 int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, uint64_t base_addr,
447 int index, int size, int cpu, int vec)
449 u32 dma_desc_lsw_addr = (u32)base_addr;
450 u32 dma_desc_msw_addr = (u32)(base_addr >> 32);
452 hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
455 hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
458 hw_atl_tdm_tx_desc_len_set(self, size / 8U, index);
460 hw_atl_b0_hw_tx_ring_tail_update(self, 0, index);
462 /* Set Tx threshold */
463 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, index);
465 /* Mapping interrupt vector */
466 hw_atl_itr_irq_map_tx_set(self, vec, index);
467 hw_atl_itr_irq_map_en_tx_set(self, true, index);
469 hw_atl_tdm_cpu_id_set(self, cpu, index);
470 hw_atl_tdm_tx_desc_dca_en_set(self, 0U, index);
472 return aq_hw_err_from_flags(self);
475 int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
477 hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
478 return aq_hw_err_from_flags(self);
481 int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
483 hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
484 hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
486 return aq_hw_err_from_flags(self);
489 int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
491 *mask = hw_atl_itr_irq_statuslsw_get(self);
492 return aq_hw_err_from_flags(self);
495 int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, int index)
497 hw_atl_tdm_tx_desc_en_set(self, 0U, index);
498 return aq_hw_err_from_flags(self);
501 int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, int index)
503 hw_atl_rdm_rx_desc_en_set(self, 0U, index);
504 return aq_hw_err_from_flags(self);