-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
/* RL increment value - rate is specified in mbps. the factor of 1.01 was
-* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
-* 2544 test. In this scenario the PF RL was reducing the line rate to 99%
-* although the credit increment value was the correct one and FW calculated
-* correct packet sizes. The reason for the inaccuracy of the RL is unknown at
-* this point.
-*/
+ * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+ * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+ * although the credit increment value was the correct one and FW calculated
+ * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+ * this point.
+ */
#define QM_RL_INC_VAL(rate) \
OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
(8 * 100)), 1)
(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4)
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
/******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
/* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
- u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
+ u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
num_pqs = num_pf_pqs + num_vf_pqs;
bool is_vf_pq, rl_valid;
u16 first_tx_pq_id;
- ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+ ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
+ pq_params[i].tc_id,
max_phys_tcs_per_port);
is_vf_pq = (i >= num_pf_pqs);
- rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
- max_qm_global_rls;
+ rl_valid = pq_params[i].rl_valid > 0;
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - start_vport;
}
/* Check RL ID */
- if (pq_params[i].rl_valid && pq_params[i].vport_id >=
- max_qm_global_rls)
+ if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT ID for rate limiter config\n");
+ rl_valid = false;
+ }
/* Prepare PQ map entry */
struct qm_rf_pq_map_e4 tx_pq_map;
+
QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
1 : 0,
first_tx_pq_id, rl_valid ?
pq_params[i].vport_id : 0,
ext_voq, pq_params[i].wrr_group);
- /* Set base address */
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+ /* Clear PQ pointer table entry (64 bit) */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
/* Write PQ info to RAM */
if (WRITE_PQ_INFO_TO_RAM != 0) {
u32 pq_info = 0;
+
pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
- pq_params[i].tc_id, port_id,
+ pq_params[i].tc_id,
+ pq_params[i].port_id,
rl_valid ? 1 : 0, rl_valid ?
pq_params[i].vport_id : 0);
ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u8 pf_id,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_tids,
u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
- u16 i, pq_id, pq_group;
+ u16 i, j, pq_id, pq_group;
/* A single other PQ group is used in each PF, where PQ group i is used
* in PF i.
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+
+ /* Clear PQ pointer table entry */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLOTHER_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
mem_addr_4kb += pq_mem_4kb;
}
}
* Return -1 on error.
*/
static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
- u8 port_id,
u8 pf_id,
u16 pf_wfq,
u8 max_phys_tcs_per_port,
}
for (i = 0; i < num_tx_pqs; i++) {
- ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+ ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
+ pq_params[i].tc_id,
max_phys_tcs_per_port);
crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
QM_REG_WFQPFCRD_RT_OFFSET :
(pf_id % MAX_NUM_PFS_BB);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id,
- inc_val);
}
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
+ pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
+
return 0;
}
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
/* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
- ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
+ ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
+ num_tids, 0);
#endif
/* Map Tx PQs */
- ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
- max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
+ ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
+ is_pf_loading, num_pf_cids, num_vf_cids,
start_pq, num_pf_pqs, num_vf_pqs, start_vport,
other_mem_size_4kb, pq_params, vport_params);
/* Init PF WFQ */
if (pf_wfq)
- if (ecore_pf_wfq_rt_init
- (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
- num_pf_pqs + num_vf_pqs, pq_params))
+ if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
+ max_phys_tcs_per_port,
+ num_pf_pqs + num_vf_pqs, pq_params))
return -1;
/* Init PF RL */
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
-#define PRS_ETH_TUNN_FIC_FORMAT -188897008
+#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
+#define PRS_ETH_OUTPUT_FORMAT -46832
+
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
ip_geneve_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
ip_geneve_enable ? 1 : 0);
}
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
+
+void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool enable)
+{
+ u32 reg_val, cfg_mask;
+
+ /* read PRS config register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
+
+ /* set VXLAN_NO_L2_ENABLE mask */
+ cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
+
+ if (enable) {
+ /* set VXLAN_NO_L2_ENABLE flag */
+ reg_val |= cfg_mask;
+
+ /* update PRS FIC register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
+ } else {
+ /* clear VXLAN_NO_L2_ENABLE flag */
+ reg_val &= ~cfg_mask;
+ }
+
+ /* write PRS config register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
+}
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
bool ipv6,
enum gft_profile_type profile_type)
{
- u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
+ u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
ram_line_lo = 0;
ram_line_hi = 0;
+ /* Search no IP as GFT */
+ search_non_ip_as_gft = 0;
+
+ /* Tunnel type */
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
- } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+
+ /* Allow tunneled traffic without inner IP */
+ search_non_ip_as_gft = 1;
}
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
+ search_non_ip_as_gft);
ecore_wr(p_hwfn, p_ptt,
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
ram_line_lo);
ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
+
+
+/*******************************************************************************
+ * File name : rdma_init.c
+ * Author : Michael Shteinbok
+ *******************************************************************************
+ *******************************************************************************
+ * Description:
+ * RDMA HSI functions
+ *
+ *******************************************************************************
+ * Notes: This is the input to the auto generated file drv_init_fw_funcs.c
+ *
+ *******************************************************************************
+ */
+static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,
+ u8 storm_id)
+{
+ switch (storm_id) {
+ case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+
+ default: return 0;
+ }
+}
+
+void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS])
+{
+ u8 storm_id;
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ u32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);
+
+ ecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
+ }
+}