#include "mcp_public.h"
#define ECORE_MAJOR_VERSION 8
-#define ECORE_MINOR_VERSION 37
-#define ECORE_REVISION_VERSION 20
+#define ECORE_MINOR_VERSION 40
+#define ECORE_REVISION_VERSION 18
#define ECORE_ENGINEERING_VERSION 0
#define ECORE_VERSION \
bool configured;
};
+#define OFLD_GRP_SIZE 4
+
struct ecore_qm_info {
struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params;
const u8 *modes_tree_buf;
union init_op *init_ops;
const u32 *arr_data;
+ const u32 *fw_overlays;
+ u32 fw_overlays_len;
u32 init_ops_size;
};
u8 num_funcs_on_engine;
u8 enabled_func_idx;
+ u8 num_funcs_on_port;
/* BAR access */
void OSAL_IOMEM *regview;
#endif
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
-#define ECORE_IS_E4(dev) (ECORE_IS_BB(dev) || ECORE_IS_AH(dev))
u16 vendor_id;
u16 device_id;
#ifndef ASIC_ONLY
bool b_is_emul_full;
+ bool b_is_emul_mac;
#endif
/* LLH info */
u8 ppfid_bitmap;
u8 engine_for_debug;
};
-#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
- : MAX_NUM_VFS_K2)
-#define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
- : MAX_NUM_L2_QUEUES_K2)
-#define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \
- : MAX_NUM_PORTS_K2)
-#define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
- : MAX_SB_PER_PATH_K2)
-#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
- : MAX_NUM_PFS_K2)
+enum ecore_hsi_def_type {
+ ECORE_HSI_DEF_MAX_NUM_VFS,
+ ECORE_HSI_DEF_MAX_NUM_L2_QUEUES,
+ ECORE_HSI_DEF_MAX_NUM_PORTS,
+ ECORE_HSI_DEF_MAX_SB_PER_PATH,
+ ECORE_HSI_DEF_MAX_NUM_PFS,
+ ECORE_HSI_DEF_MAX_NUM_VPORTS,
+ ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE,
+ ECORE_HSI_DEF_MAX_QM_TX_QUEUES,
+ ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS,
+ ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
+ ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS,
+ ECORE_HSI_DEF_MAX_PBF_CMD_LINES,
+ ECORE_HSI_DEF_MAX_BTB_BLOCKS,
+ ECORE_NUM_HSI_DEFS
+};
+
+u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev,
+ enum ecore_hsi_def_type type);
+
+#define NUM_OF_VFS(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_VFS)
+#define NUM_OF_L2_QUEUES(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_L2_QUEUES)
+#define NUM_OF_PORTS(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_PORTS)
+#define NUM_OF_SBS(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_SB_PER_PATH)
+#define NUM_OF_ENG_PFS(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_PFS)
+#define NUM_OF_VPORTS(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_VPORTS)
+#define NUM_OF_RSS_ENGINES(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE)
+#define NUM_OF_QM_TX_QUEUES(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_QM_TX_QUEUES)
+#define NUM_OF_PXP_ILT_RECORDS(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS)
+#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
+#define NUM_OF_QM_GLOBAL_RLS(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS)
+#define NUM_OF_PBF_CMD_LINES(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_PBF_CMD_LINES)
+#define NUM_OF_BTB_BLOCKS(dev) \
+ ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_BTB_BLOCKS)
#define CRC8_TABLE_SIZE 256
}
#define PKT_LB_TC 9
-#define MAX_NUM_VOQS_E4 20
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid);
enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev);
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+
+#define MSTORM_QZONE_START(dev) \
+ (TSTORM_QZONE_START + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
#endif /* __ECORE_H */
/* Searcher constants */
#define SRC_MIN_NUM_ELEMS 256
+/* GFS constants */
+#define RGFS_MIN_NUM_ELEMS 256
+#define TGFS_MIN_NUM_ELEMS 256
+
/* Timers constants */
#define TM_SHIFT 7
#define TM_ALIGN (1 << TM_SHIFT)
#define CDUT_SEG_BLK(n) (1 + (u8)(n))
#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
-enum ilt_clients {
- ILT_CLI_CDUC,
- ILT_CLI_CDUT,
- ILT_CLI_QM,
- ILT_CLI_TM,
- ILT_CLI_SRC,
- ILT_CLI_TSDM,
- ILT_CLI_MAX
-};
-
struct ilt_cfg_pair {
u32 reg;
u32 val;
u32 total_size; /* 0 means not active */
u32 real_size_in_page;
u32 start_line;
+ u32 dynamic_line_offset;
u32 dynamic_line_cnt;
};
u32 vf_total_lines;
};
-/* Per Path -
- * ILT shadow table
- * Protocol acquired CID lists
- * PF start line in ILT
- */
-struct ecore_dma_mem {
- dma_addr_t p_phys;
- void *p_virt;
- osal_size_t size;
-};
-
#define MAP_WORD_SIZE sizeof(unsigned long)
#define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8)
unsigned long *cid_map;
};
+struct ecore_src_t2 {
+ struct phys_mem_desc *dma_mem;
+ u32 num_pages;
+ u64 first_free;
+ u64 last_free;
+};
+
struct ecore_cxt_mngr {
/* Per protocl configuration */
struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
struct ecore_cid_acquired_map *acquired_vf[MAX_CONN_TYPES];
/* ILT shadow table */
- struct ecore_dma_mem *ilt_shadow;
+ struct phys_mem_desc *ilt_shadow;
u32 pf_start_line;
/* Mutex for a dynamic ILT allocation */
osal_mutex_t mutex;
/* SRC T2 */
- struct ecore_dma_mem *t2;
- u32 t2_num_pages;
- u64 first_free;
- u64 last_free;
+ struct ecore_src_t2 src_t2;
/* The infrastructure originally was very generic and context/task
* oriented - per connection-type we would set how many of those
u32 per_vf_tids;
};
-static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_mngr *p_mngr,
struct ecore_tm_iids *iids)
{
+ struct ecore_conn_type_cfg *p_cfg;
bool tm_vf_required = false;
bool tm_required = false;
u32 i, j;
for (i = 0; i < MAX_CONN_TYPES; i++) {
- struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+ p_cfg = &p_mngr->conn_cfg[i];
if (tm_cid_proto(i) || tm_required) {
if (p_cfg->cid_count)
p_blk->start_line);
}
-static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
- enum ilt_clients ilt_client)
+static void ecore_ilt_get_dynamic_line_range(struct ecore_hwfn *p_hwfn,
+ enum ilt_clients ilt_client,
+ u32 *dynamic_line_offset,
+ u32 *dynamic_line_cnt)
{
- u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
struct ecore_ilt_client_cfg *p_cli;
- u32 lines_to_skip = 0;
+ struct ecore_conn_type_cfg *p_cfg;
u32 cxts_per_p;
/* TBD MK: ILT code should be simplified once PROTO enum is changed */
+ *dynamic_line_offset = 0;
+ *dynamic_line_cnt = 0;
+
if (ilt_client == ILT_CLI_CDUC) {
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ p_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE];
cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
(u32)CONN_CXT_SIZE(p_hwfn);
- lines_to_skip = cid_count / cxts_per_p;
+ *dynamic_line_cnt = p_cfg->cid_count / cxts_per_p;
+ }
+}
+
+static struct ecore_ilt_client_cfg *
+ecore_cxt_set_cli(struct ecore_ilt_client_cfg *p_cli)
+{
+ p_cli->active = false;
+ p_cli->first.val = 0;
+ p_cli->last.val = 0;
+ return p_cli;
+}
+
+static struct ecore_ilt_cli_blk *
+ecore_cxt_set_blk(struct ecore_ilt_cli_blk *p_blk)
+{
+ p_blk->total_size = 0;
+ return p_blk;
}
- return lines_to_skip;
+static u32
+ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)
+{
+ struct ecore_src_iids src_iids;
+ u32 elem_num = 0;
+
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+ ecore_cxt_src_iids(p_mngr, &src_iids);
+
+ /* Both the PF and VFs searcher connections are stored in the per PF
+ * database. Thus sum the PF searcher cids and all the VFs searcher
+ * cids.
+ */
+ elem_num = src_iids.pf_cids +
+ src_iids.per_vf_cids * p_mngr->vf_count;
+ if (elem_num == 0)
+ return elem_num;
+
+ elem_num = OSAL_MAX_T(u32, elem_num, SRC_MIN_NUM_ELEMS);
+ elem_num = OSAL_ROUNDUP_POW_OF_TWO(elem_num);
+
+ return elem_num;
}
enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
{
+ u32 curr_line, total, i, task_size, line, total_size, elem_size;
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 curr_line, total, i, task_size, line;
struct ecore_ilt_client_cfg *p_cli;
struct ecore_ilt_cli_blk *p_blk;
struct ecore_cdu_iids cdu_iids;
- struct ecore_src_iids src_iids;
struct ecore_qm_iids qm_iids;
struct ecore_tm_iids tm_iids;
struct ecore_tid_seg *p_seg;
OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
- OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
/* CDUC */
- p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
curr_line = p_mngr->pf_start_line;
/* get the counters for the CDUC,CDUC and QM clients */
ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
- p_blk = &p_cli->pf_blks[CDUC_BLK];
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
- p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
- ILT_CLI_CDUC);
+ ecore_ilt_get_dynamic_line_range(p_hwfn, ILT_CLI_CDUC,
+ &p_blk->dynamic_line_offset,
+ &p_blk->dynamic_line_cnt);
/* CDUC VF */
- p_blk = &p_cli->vf_blks[CDUC_BLK];
+ p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
ILT_CLI_CDUC);
/* CDUT PF */
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
p_cli->first.val = curr_line;
/* first the 'working' task memory */
if (!p_seg || p_seg->count == 0)
continue;
- p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
p_mngr->task_type_size[p_seg->type]);
if (!p_seg || p_seg->count == 0)
continue;
- p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+ p_blk =
+ ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
if (!p_seg->has_fl_mem) {
/* The segment is active (total size pf 'working'
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUT);
}
- p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+ p_cli->pf_total_lines = curr_line - p_cli->first.val;
/* CDUT VF */
p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
/* 'working' memory */
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
- p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
ecore_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total,
p_mngr->task_type_size[p_seg->type]);
ILT_CLI_CDUT);
/* 'init' memory */
- p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ p_blk =
+ ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
if (!p_seg->has_fl_mem) {
/* see comment above */
line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUT);
}
- p_cli->vf_total_lines = curr_line -
- p_cli->vf_blks[0].start_line;
+ p_cli->vf_total_lines = curr_line - (p_cli->first.val +
+ p_cli->pf_total_lines);
/* Now for the rest of the VFs */
for (i = 1; i < p_mngr->vf_count; i++) {
+ /* don't set p_blk i.e. don't clear total_size */
p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUT);
+ /* don't set p_blk i.e. don't clear total_size */
p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUT);
}
/* QM */
- p_cli = &p_mngr->clients[ILT_CLI_QM];
- p_blk = &p_cli->pf_blks[0];
-
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
+
+ /* At this stage, after the first QM configuration, the PF PQs amount
+ * is the highest possible. Save this value at qm_info->ilt_pf_pqs to
+ * detect overflows in the future.
+ * Even though VF PQs amount can be larger than VF count, use vf_count
+ * because each VF requires only the full amount of CIDs.
+ */
ecore_cxt_qm_iids(p_hwfn, &qm_iids);
- total = ecore_qm_pf_mem_size(qm_iids.cids,
+ total = ecore_qm_pf_mem_size(p_hwfn, qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids,
- p_hwfn->qm_info.num_pqs,
+ p_hwfn->qm_info.num_pqs + OFLD_GRP_SIZE,
p_hwfn->qm_info.num_vf_pqs);
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
- /* SRC */
- p_cli = &p_mngr->clients[ILT_CLI_SRC];
- ecore_cxt_src_iids(p_mngr, &src_iids);
-
- /* Both the PF and VFs searcher connections are stored in the per PF
- * database. Thus sum the PF searcher cids and all the VFs searcher
- * cids.
- */
- total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
- if (total) {
- u32 local_max = OSAL_MAX_T(u32, total,
- SRC_MIN_NUM_ELEMS);
-
- total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
-
- p_blk = &p_cli->pf_blks[0];
- ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
- total * sizeof(struct src_ent),
- sizeof(struct src_ent));
-
- ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
- ILT_CLI_SRC);
- p_cli->pf_total_lines = curr_line - p_blk->start_line;
- }
-
/* TM PF */
- p_cli = &p_mngr->clients[ILT_CLI_TM];
- ecore_cxt_tm_iids(p_mngr, &tm_iids);
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
+ ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
total = tm_iids.pf_cids + tm_iids.pf_tids_total;
if (total) {
- p_blk = &p_cli->pf_blks[0];
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
- total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+ total * TM_ELEM_SIZE,
+ TM_ELEM_SIZE);
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
/* TM VF */
total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
if (total) {
- p_blk = &p_cli->vf_blks[0];
+ p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[0]);
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * TM_ELEM_SIZE, TM_ELEM_SIZE);
}
}
+ /* SRC */
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
+ total = ecore_cxt_src_elements(p_mngr);
+
+ if (total) {
+ total_size = total * sizeof(struct src_ent);
+ elem_size = sizeof(struct src_ent);
+
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total_size, elem_size);
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_SRC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
/* TSDM (SRQ CONTEXT) */
total = ecore_cxt_get_srq_count(p_hwfn);
if (total) {
- p_cli = &p_mngr->clients[ILT_CLI_TSDM];
- p_blk = &p_cli->pf_blks[SRQ_BLK];
+ p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
+ p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
u32 i;
- if (!p_mngr->t2)
+ if (!p_t2 || !p_t2->dma_mem)
return;
- for (i = 0; i < p_mngr->t2_num_pages; i++)
- if (p_mngr->t2[i].p_virt)
+ for (i = 0; i < p_t2->num_pages; i++)
+ if (p_t2->dma_mem[i].virt_addr)
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_mngr->t2[i].p_virt,
- p_mngr->t2[i].p_phys,
- p_mngr->t2[i].size);
+ p_t2->dma_mem[i].virt_addr,
+ p_t2->dma_mem[i].phys_addr,
+ p_t2->dma_mem[i].size);
- OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
+ OSAL_FREE(p_hwfn->p_dev, p_t2->dma_mem);
+ p_t2->dma_mem = OSAL_NULL;
+}
+
+static enum _ecore_status_t
+ecore_cxt_t2_alloc_pages(struct ecore_hwfn *p_hwfn,
+ struct ecore_src_t2 *p_t2,
+ u32 total_size, u32 page_size)
+{
+ void **p_virt;
+ u32 size, i;
+
+ if (!p_t2 || !p_t2->dma_mem)
+ return ECORE_INVAL;
+
+ for (i = 0; i < p_t2->num_pages; i++) {
+ size = OSAL_MIN_T(u32, total_size, page_size);
+ p_virt = &p_t2->dma_mem[i].virt_addr;
+
+ *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_t2->dma_mem[i].phys_addr,
+ size);
+ if (!p_t2->dma_mem[i].virt_addr)
+ return ECORE_NOMEM;
+
+ OSAL_MEM_ZERO(*p_virt, size);
+ p_t2->dma_mem[i].size = size;
+ total_size -= size;
+ }
+
+ return ECORE_SUCCESS;
}
static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_num, total_size, ent_per_page, psz, i;
+ struct phys_mem_desc *p_t2_last_page;
struct ecore_ilt_client_cfg *p_src;
struct ecore_src_iids src_iids;
- struct ecore_dma_mem *p_t2;
+ struct ecore_src_t2 *p_t2;
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
/* use the same page size as the SRC ILT client */
psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
- p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+ p_t2 = &p_mngr->src_t2;
+ p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
/* allocate t2 */
- p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- p_mngr->t2_num_pages *
- sizeof(struct ecore_dma_mem));
- if (!p_mngr->t2) {
+ p_t2->dma_mem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ p_t2->num_pages *
+ sizeof(struct phys_mem_desc));
+ if (!p_t2->dma_mem) {
DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
rc = ECORE_NOMEM;
goto t2_fail;
}
- /* allocate t2 pages */
- for (i = 0; i < p_mngr->t2_num_pages; i++) {
- u32 size = OSAL_MIN_T(u32, total_size, psz);
- void **p_virt = &p_mngr->t2[i].p_virt;
-
- *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
- &p_mngr->t2[i].p_phys, size);
- if (!p_mngr->t2[i].p_virt) {
- rc = ECORE_NOMEM;
- goto t2_fail;
- }
- OSAL_MEM_ZERO(*p_virt, size);
- p_mngr->t2[i].size = size;
- total_size -= size;
- }
+ rc = ecore_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
+ if (rc)
+ goto t2_fail;
/* Set the t2 pointers */
/* entries per page - must be a power of two */
ent_per_page = psz / sizeof(struct src_ent);
- p_mngr->first_free = (u64)p_mngr->t2[0].p_phys;
+ p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
- p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
- p_mngr->last_free = (u64)p_t2->p_phys +
- ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+ p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
+ p_t2->last_free = (u64)p_t2_last_page->phys_addr +
+ ((conn_num - 1) & (ent_per_page - 1)) *
+ sizeof(struct src_ent);
- for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ for (i = 0; i < p_t2->num_pages; i++) {
u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
- struct src_ent *entries = p_mngr->t2[i].p_virt;
- u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;
+ struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
+ u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
u32 j;
for (j = 0; j < ent_num - 1; j++) {
entries[j].next = OSAL_CPU_TO_BE64(val);
}
- if (i < p_mngr->t2_num_pages - 1)
- val = (u64)p_mngr->t2[i + 1].p_phys;
+ if (i < p_t2->num_pages - 1)
+ val = (u64)p_t2->dma_mem[i + 1].phys_addr;
else
val = 0;
entries[j].next = OSAL_CPU_TO_BE64(val);
ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
- struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+ struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
- if (p_dma->p_virt)
+ if (p_dma->virt_addr)
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
p_dma->p_virt,
- p_dma->p_phys, p_dma->size);
- p_dma->p_virt = OSAL_NULL;
+ p_dma->phys_addr, p_dma->size);
+ p_dma->virt_addr = OSAL_NULL;
}
OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
p_mngr->ilt_shadow = OSAL_NULL;
struct ecore_ilt_cli_blk *p_blk,
enum ilt_clients ilt_client, u32 start_line_offset)
{
- struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
- u32 lines, line, sz_left, lines_to_skip = 0;
+ struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 lines, line, sz_left, lines_to_skip, first_skipped_line;
/* Special handling for RoCE that supports dynamic allocation */
if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)
return ECORE_SUCCESS;
- lines_to_skip = p_blk->dynamic_line_cnt;
-
if (!p_blk->total_size)
return ECORE_SUCCESS;
sz_left = p_blk->total_size;
+ lines_to_skip = p_blk->dynamic_line_cnt;
lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
line = p_blk->start_line + start_line_offset -
- p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
+ p_hwfn->p_cxt_mngr->pf_start_line;
+ first_skipped_line = line + p_blk->dynamic_line_offset;
- for (; lines; lines--) {
+ while (lines) {
dma_addr_t p_phys;
void *p_virt;
u32 size;
+ if (lines_to_skip && (line == first_skipped_line)) {
+ line += lines_to_skip;
+ continue;
+ }
+
size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
/* @DPDK */
return ECORE_NOMEM;
OSAL_MEM_ZERO(p_virt, size);
- ilt_shadow[line].p_phys = p_phys;
- ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].phys_addr = p_phys;
+ ilt_shadow[line].virt_addr = p_virt;
ilt_shadow[line].size = size;
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
sz_left -= size;
line++;
+ lines--;
}
return ECORE_SUCCESS;
size = ecore_cxt_ilt_shadow_size(clients);
p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- size * sizeof(struct ecore_dma_mem));
+ size * sizeof(struct phys_mem_desc));
if (!p_mngr->ilt_shadow) {
DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
"Allocated 0x%x bytes for ilt shadow\n",
- (u32)(size * sizeof(struct ecore_dma_mem)));
+ (u32)(size * sizeof(struct phys_mem_desc)));
for_each_ilt_valid_client(i, clients) {
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
}
static enum _ecore_status_t
-ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
+__ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
u32 cid_start, u32 cid_count,
struct ecore_cid_acquired_map *p_map)
{
return ECORE_SUCCESS;
}
-static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
+static enum _ecore_status_t
+ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type, u32 start_cid,
+ u32 vf_start_cid)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
- u32 start_cid = 0, vf_start_cid = 0;
- u32 type, vf;
+ u32 vf, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
+ struct ecore_cid_acquired_map *p_map;
+ struct ecore_conn_type_cfg *p_cfg;
+ enum _ecore_status_t rc;
- for (type = 0; type < MAX_CONN_TYPES; type++) {
- struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
- struct ecore_cid_acquired_map *p_map;
+ p_cfg = &p_mngr->conn_cfg[type];
/* Handle PF maps */
p_map = &p_mngr->acquired[type];
- if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
- p_cfg->cid_count, p_map))
- goto cid_map_fail;
+ rc = __ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
+ p_cfg->cid_count, p_map);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Handle VF maps */
+ for (vf = 0; vf < max_num_vfs; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ rc = __ecore_cid_map_alloc_single(p_hwfn, type, vf_start_cid,
+ p_cfg->cids_per_vf, p_map);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
- /* Handle VF maps */
- for (vf = 0; vf < max_num_vfs; vf++) {
- p_map = &p_mngr->acquired_vf[type][vf];
- if (ecore_cid_map_alloc_single(p_hwfn, type,
- vf_start_cid,
- p_cfg->cids_per_vf,
- p_map))
- goto cid_map_fail;
- }
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 start_cid = 0, vf_start_cid = 0;
+ u32 type;
+ enum _ecore_status_t rc;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ rc = ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
+ vf_start_cid);
+ if (rc != ECORE_SUCCESS)
+ goto cid_map_fail;
- start_cid += p_cfg->cid_count;
- vf_start_cid += p_cfg->cids_per_vf;
+ start_cid += p_mngr->conn_cfg[type].cid_count;
+ vf_start_cid += p_mngr->conn_cfg[type].cids_per_vf;
}
return ECORE_SUCCESS;
cid_map_fail:
ecore_cid_map_free(p_hwfn);
- return ECORE_NOMEM;
+ return rc;
}
enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
{
+ struct ecore_cid_acquired_map *acquired_vf;
struct ecore_ilt_client_cfg *clients;
struct ecore_cxt_mngr *p_mngr;
- u32 i;
+ u32 i, max_num_vfs;
p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
if (!p_mngr) {
return ECORE_NOMEM;
}
- /* Set the cxt mangr pointer prior to further allocations */
- p_hwfn->p_cxt_mngr = p_mngr;
-
/* Initialize ILT client registers */
clients = p_mngr->clients;
clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
#endif
OSAL_MUTEX_INIT(&p_mngr->mutex);
+ /* Set the cxt mangr pointer prior to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
+ max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ acquired_vf = OSAL_CALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ max_num_vfs, sizeof(*acquired_vf));
+ if (!acquired_vf) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate an array of `struct ecore_cid_acquired_map'\n");
+ return ECORE_NOMEM;
+ }
+
+ p_mngr->acquired_vf[i] = acquired_vf;
+ }
+
return ECORE_SUCCESS;
}
void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
{
+ u32 i;
+
if (!p_hwfn->p_cxt_mngr)
return;
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
#endif
+ for (i = 0; i < MAX_CONN_TYPES; i++)
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr->acquired_vf[i]);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
+
+ p_hwfn->p_cxt_mngr = OSAL_NULL;
}
void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
bool is_pf_loading)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- struct ecore_mcp_link_state *p_link;
struct ecore_qm_iids iids;
OSAL_MEM_ZERO(&iids, sizeof(iids));
ecore_cxt_qm_iids(p_hwfn, &iids);
-
- p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
-
ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
qm_info->max_phys_tcs_per_port,
is_pf_loading,
qm_info->num_vf_pqs,
qm_info->start_vport,
qm_info->num_vports, qm_info->pf_wfq,
- qm_info->pf_rl, p_link->speed,
+ qm_info->pf_rl,
p_hwfn->qm_info.qm_pq_params,
p_hwfn->qm_info.qm_vport_params);
}
{
struct ecore_ilt_client_cfg *clients;
struct ecore_cxt_mngr *p_mngr;
- struct ecore_dma_mem *p_shdw;
+ struct phys_mem_desc *p_shdw;
u32 line, rt_offst, i;
ecore_ilt_bounds_init(p_hwfn);
/** p_virt could be OSAL_NULL incase of dynamic
* allocation
*/
- if (p_shdw[line].p_virt != OSAL_NULL) {
+ if (p_shdw[line].virt_addr != OSAL_NULL) {
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
- (p_shdw[line].p_phys >> 12));
+ (p_shdw[line].phys_addr >> 12));
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
"Setting RT[0x%08x] from"
" Physical addr: 0x%lx\n",
rt_offst, line, i,
(unsigned long)(p_shdw[line].
- p_phys >> 12));
+ phys_addr >> 12));
}
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
OSAL_LOG2(rounded_conn_num));
STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
- p_hwfn->p_cxt_mngr->first_free);
+ p_hwfn->p_cxt_mngr->src_t2.first_free);
STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
- p_hwfn->p_cxt_mngr->last_free);
+ p_hwfn->p_cxt_mngr->src_t2.last_free);
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
"Configured SEARCHER for 0x%08x connections\n",
conn_num);
u8 i;
OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
- ecore_cxt_tm_iids(p_mngr, &tm_iids);
+ ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
/* @@@TBD No pre-scan for now */
- /* Note: We assume consecutive VFs for a PF */
- for (i = 0; i < p_mngr->vf_count; i++) {
cfg_word = 0;
SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
- SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
(sizeof(cfg_word) / sizeof(u32)) *
(p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
(NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
- /* enale scan */
+ /* enable scan */
STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
tm_iids.pf_cids ? 0x1 : 0x0);
line = p_info->iid / cxts_per_p;
/* Make sure context is allocated (dynamic allocation) */
- if (!p_mngr->ilt_shadow[line].p_virt)
+ if (!p_mngr->ilt_shadow[line].virt_addr)
return ECORE_INVAL;
- p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +
+ p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].virt_addr +
p_info->iid % cxts_per_p * conn_cxt_size;
DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
- if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
goto out0;
p_ptt = ecore_ptt_acquire(p_hwfn);
}
OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);
- p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
- p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
p_blk->real_size_in_page;
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry,
ILT_ENTRY_PHY_ADDR,
- (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >> 12));
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
OSAL_NULL /* default parameters */);
- if (elem_type == ECORE_ELEM_CXT) {
- u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
- elems_per_p;
-
- /* Update the relevant register in the parser */
- ecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
- last_cid_allocated - 1);
-
- if (!p_hwfn->b_rdma_enabled_in_prs) {
- /* Enable RoCE search */
- ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
- p_hwfn->b_rdma_enabled_in_prs = true;
- }
- }
-
out1:
ecore_ptt_release(p_hwfn, p_ptt);
out0:
}
for (i = shadow_start_line; i < shadow_end_line; i++) {
- if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
continue;
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = OSAL_NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
/* compute absolute offset */
ECORE_ELEM_TASK
};
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_RGFS,
+ ILT_CLI_TGFS,
+ ILT_CLI_MAX
+};
+
u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
u32 *vf_cid);
continue;
/* if no app tlv was present, don't override in FW */
- ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false,
- priority, tc, type);
+ ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt,
+ p_data->arr[DCBX_PROTOCOL_ETH].enable,
+ priority, tc, type);
}
return ECORE_SUCCESS;
static osal_spinlock_t qm_lock;
static u32 qm_lock_ref_cnt;
+#ifndef ASIC_ONLY
+static bool b_ptt_gtt_init;
+#endif
+
/******************** Doorbell Recovery *******************/
/* The doorbell recovery mechanism consists of a list of entries which represent
* doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
/* Filter enable - should be done first when removing a filter */
if (b_write_access && !p_details->enable) {
- addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
p_details->enable);
}
/* Filter value */
- addr = NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 2 * filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
OSAL_MEMSET(¶ms, 0, sizeof(params));
if (b_write_access) {
return rc;
/* Filter mode */
- addr = NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4;
if (b_write_access)
ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, p_details->mode);
else
addr);
/* Filter protocol type */
- addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4;
if (b_write_access)
ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
p_details->protocol_type);
/* Filter enable - should be done last when adding a filter */
if (!b_write_access || p_details->enable) {
- addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
if (b_write_access)
ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
p_details->enable);
}
static enum _ecore_status_t
-ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type,
u32 high, u32 low)
{
}
static enum _ecore_status_t
-ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,
+ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
{
struct ecore_llh_filter_details filter_details;
true /* write access */);
}
-static enum _ecore_status_t
-ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high,
- u32 low)
-{
- return ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid,
- filter_idx, filter_prot_type,
- high, low);
-}
-
-static enum _ecore_status_t
-ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- u8 abs_ppfid, u8 filter_idx)
-{
- return ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid,
- filter_idx);
-}
-
enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
u8 mac_addr[ETH_ALEN])
{
for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
filter_idx++) {
- rc = ecore_llh_remove_filter_e4(p_hwfn, p_ptt,
+ rc = ecore_llh_remove_filter(p_hwfn, p_ptt,
abs_ppfid, filter_idx);
if (rc != ECORE_SUCCESS)
goto out;
return ECORE_SUCCESS;
}
-static enum _ecore_status_t
-ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- u8 ppfid)
+enum _ecore_status_t
+ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
struct ecore_llh_filter_details filter_details;
u8 abs_ppfid, filter_idx;
u32 addr;
enum _ecore_status_t rc;
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
if (rc != ECORE_SUCCESS)
- return rc;
+ goto out;
addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
DP_NOTICE(p_hwfn, false,
filter_idx, &filter_details,
false /* read access */);
if (rc != ECORE_SUCCESS)
- return rc;
+ goto out;
DP_NOTICE(p_hwfn, false,
"filter %2hhd: enable %d, value 0x%016lx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
filter_details.protocol_type, filter_details.hdr_sel);
}
- return ECORE_SUCCESS;
-}
-
-enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
-{
- struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
- enum _ecore_status_t rc;
-
- if (p_ptt == OSAL_NULL)
- return ECORE_AGAIN;
-
- rc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid);
+out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
{
/* Initialize qm port parameters */
u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
for (i = 0; i < num_ports; i++) {
struct init_qm_port_params *p_qm_port =
&p_hwfn->qm_info.qm_port_params[i];
+ u16 pbf_max_cmd_lines;
p_qm_port->active = 1;
p_qm_port->active_phys_tcs = active_phys_tcs;
- p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
- p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(p_dev);
+ p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;
+ p_qm_port->num_btb_blocks =
+ NUM_OF_BTB_BLOCKS(p_dev) / num_ports;
}
}
(pq_init_flags & PQ_INIT_PF_RL ||
pq_init_flags & PQ_INIT_VF_RL);
+ /* The "rl_id" is set as the "vport_id" */
+ qm_info->qm_pq_params[pq_idx].rl_id =
+ qm_info->qm_pq_params[pq_idx].vport_id;
+
/* qm params accounting */
qm_info->num_pqs++;
if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
/* pq table */
for (i = 0; i < qm_info->num_pqs; i++) {
pq = &qm_info->qm_pq_params[i];
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d, rl_id %d\n",
qm_info->start_pq + i, pq->port_id, pq->vport_id,
- pq->tc_id, pq->wrr_group, pq->rl_valid);
+ pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);
}
}
"Failed to allocate dbg user info structure\n");
goto alloc_err;
}
+
+ rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate dbg user info structure\n");
+ goto alloc_err;
+ }
} /* hwfn loop */
rc = ecore_llh_alloc(p_dev);
{
int hw_mode = 0;
- if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
+ if (ECORE_IS_BB(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_BB;
} else if (ECORE_IS_AH(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_K2;
}
#ifndef ASIC_ONLY
-/* MFW-replacement initializations for non-ASIC */
-static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
+/* MFW-replacement initializations for emulation */
+static enum _ecore_status_t ecore_hw_init_chip(struct ecore_dev *p_dev,
struct ecore_ptt *p_ptt)
{
- struct ecore_dev *p_dev = p_hwfn->p_dev;
- u32 pl_hv = 1;
- int i;
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ u32 pl_hv, wr_mbs;
+ int i, pos;
+ u16 ctrl = 0;
- if (CHIP_REV_IS_EMUL(p_dev)) {
- if (ECORE_IS_AH(p_dev))
- pl_hv |= 0x600;
+ if (!CHIP_REV_IS_EMUL(p_dev)) {
+ DP_NOTICE(p_dev, false,
+ "ecore_hw_init_chip() shouldn't be called in a non-emulation environment\n");
+ return ECORE_INVAL;
}
+ pl_hv = ECORE_IS_BB(p_dev) ? 0x1 : 0x401;
ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
if (ECORE_IS_AH(p_dev))
ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2, 0x3ffffff);
- /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
- /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
- if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
+ /* Initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
+ if (ECORE_IS_BB(p_dev))
ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
- if (CHIP_REV_IS_EMUL(p_dev)) {
- if (ECORE_IS_AH(p_dev)) {
- /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
- (p_dev->num_ports_in_engine >> 1));
+ if (ECORE_IS_AH(p_dev)) {
+ /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
+ p_dev->num_ports_in_engine >> 1);
- ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
- p_dev->num_ports_in_engine == 4 ? 0 : 3);
- }
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
+ p_dev->num_ports_in_engine == 4 ? 0 : 3);
}
- /* Poll on RBC */
+ /* Signal the PSWRQ block to start initializing internal memories */
ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
for (i = 0; i < 100; i++) {
OSAL_UDELAY(50);
if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
break;
}
- if (i == 100)
+ if (i == 100) {
DP_NOTICE(p_hwfn, true,
"RBC done failed to complete in PSWRQ2\n");
+ return ECORE_TIMEOUT;
+ }
+
+ /* Indicate PSWRQ to initialize steering tag table with zeros */
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RESET_STT, 1);
+ for (i = 0; i < 100; i++) {
+ OSAL_UDELAY(50);
+ if (!ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_RESET_STT))
+ break;
+ }
+ if (i == 100) {
+ DP_NOTICE(p_hwfn, true,
+ "Steering tag table initialization failed to complete in PSWRQ2\n");
+ return ECORE_TIMEOUT;
+ }
+
+ /* Clear a possible PSWRQ2 STT parity which might have been generated by
+ * a previous MSI-X read.
+ */
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_PRTY_STS_WR_H_0, 0x8);
+
+ /* Configure PSWRQ2_REG_WR_MBS0 according to the MaxPayloadSize field in
+ * the PCI configuration space. The value is common for all PFs, so it
+ * is okay to do it according to the first loading PF.
+ */
+ pos = OSAL_PCI_FIND_CAPABILITY(p_dev, PCI_CAP_ID_EXP);
+ if (!pos) {
+ DP_NOTICE(p_dev, true,
+ "Failed to find the PCI Express Capability structure in the PCI config space\n");
+ return ECORE_IO;
+ }
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
+ wr_mbs = (ctrl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0, wr_mbs);
+
+ /* Configure the PGLUE_B to discard mode */
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_DISCARD_NBLOCK, 0x3f);
return ECORE_SUCCESS;
}
static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
{
u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
- int i, igu_sb_id;
+ u32 igu_sb_id;
+ int i;
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
ecore_gtt_init(p_hwfn);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_dev)) {
- rc = ecore_hw_init_chip(p_hwfn, p_ptt);
+ if (CHIP_REV_IS_EMUL(p_dev) && IS_LEAD_HWFN(p_hwfn)) {
+ rc = ecore_hw_init_chip(p_dev, p_ptt);
if (rc != ECORE_SUCCESS)
return rc;
}
qm_info->max_phys_tcs_per_port,
qm_info->pf_rl_en, qm_info->pf_wfq_en,
qm_info->vport_rl_en, qm_info->vport_wfq_en,
- qm_info->qm_port_params);
+ qm_info->qm_port_params,
+ OSAL_NULL /* global RLs are not configured */);
ecore_cxt_hw_init_common(p_hwfn);
/* Workaround clears ROCE search for all functions to prevent
* involving non initialized function in processing ROCE packet.
*/
- num_pfs = NUM_OF_ENG_PFS(p_dev);
+ num_pfs = (u16)NUM_OF_ENG_PFS(p_dev);
for (pf_id = 0; pf_id < num_pfs; pf_id++) {
ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
* This is not done inside the init tool since it currently can't
* perform a pretending to VFs.
*/
- max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+ max_num_vfs = (u8)NUM_OF_VFS(p_dev);
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
{
u8 loopback = 0, port = p_hwfn->port_id * 2;
- DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
-
/* XLPORT MAC MODE *//* 0 Quad, 4 Single... */
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
port);
}
#endif
+static u32 ecore_hw_norm_region_conn(struct ecore_hwfn *p_hwfn)
+{
+ u32 norm_region_conn;
+
+ /* The order of CIDs allocation is according to the order of
+ * 'enum protocol_type'. Therefore, the number of CIDs for the normal
+ * region is calculated based on the CORE CIDs, in case of non-ETH
+ * personality, and otherwise - based on the ETH CIDs.
+ */
+ norm_region_conn =
+ ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+ OSAL_NULL) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ OSAL_NULL);
+
+ return norm_region_conn;
+}
+
static enum _ecore_status_t
ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ u32 norm_region_conn, min_addr_reg1;
u32 pwm_regsize, norm_regsize;
- u32 non_pwm_conn, min_addr_reg1;
u32 db_bar_size, n_cpus;
u32 roce_edpm_mode;
u32 pf_dems_shift;
* connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
* in units of 4,096 bytes.
*/
- non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
- ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
- OSAL_NULL) +
- ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
- norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn,
+ norm_region_conn = ecore_hw_norm_region_conn(p_hwfn);
+ norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * norm_region_conn,
OSAL_PAGE_SIZE);
min_addr_reg1 = norm_regsize / 4096;
pwm_regsize = db_bar_size - norm_regsize;
struct ecore_ptt *p_ptt,
int hw_mode)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* In CMT the gate should be cleared by the 2nd hwfn */
- if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
+ if (!ECORE_IS_CMT(p_dev) || !IS_LEAD_HWFN(p_hwfn))
STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
- return ECORE_SUCCESS;
+ if (CHIP_REV_IS_FPGA(p_dev) && ECORE_IS_BB(p_dev))
+ ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
- if (ECORE_IS_AH(p_hwfn->p_dev))
- return ECORE_SUCCESS;
- else if (ECORE_IS_BB(p_hwfn->p_dev))
- ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
- } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (ECORE_IS_CMT(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_CMT(p_dev)) {
/* Activate OPTE in CMT */
u32 val;
0x55555555);
}
+ /* Set the TAGMAC default function on the port if needed.
+ * The ppfid should be set in the vector, except in BB which has
+ * a bug in the LLH where the ppfid is actually engine based.
+ */
+ if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_dev->mf_bits)) {
+ u8 pf_id = p_hwfn->rel_pf_id;
+
+ if (!ECORE_IS_BB(p_dev))
+ pf_id /= p_dev->num_ports_in_engine;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
+ }
+
ecore_emul_link_init(p_hwfn, p_ptt);
- } else {
- DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
}
#endif
- return rc;
+ return ECORE_SUCCESS;
}
static enum _ecore_status_t
goto load_err;
/* Clear the pglue_b was_error indication.
- * In E4 it must be done after the BME and the internal
- * FID_enable for the PF are set, since VDMs may cause the
- * indication to be set again.
+ * It must be done after the BME and the internal FID_enable for
+ * the PF are set, since VDMs may cause the indication to be set
+ * again.
*/
ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
return ECORE_SUCCESS;
}
+#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
+
+static u32 ecore_hsi_def_val[][MAX_CHIP_IDS] = {
+ {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
+ {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
+ {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
+ {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2, },
+ {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
+ {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
+ {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
+ {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
+ {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
+ {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
+ {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
+ {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
+ {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
+};
+
+u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev, enum ecore_hsi_def_type type)
+{
+ enum chip_ids chip_id = ECORE_IS_BB(p_dev) ? CHIP_BB : CHIP_K2;
+
+ if (type >= ECORE_NUM_HSI_DEFS) {
+ DP_ERR(p_dev, "Unexpected HSI definition type [%d]\n", type);
+ return 0;
+ }
+
+ return ecore_hsi_def_val[type][chip_id];
+}
+
static enum _ecore_status_t
ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
u32 resc_max_val, mcp_resp;
u8 res_id;
enum _ecore_status_t rc;
u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
- bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
switch (res_id) {
case ECORE_L2_QUEUE:
- *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
- MAX_NUM_L2_QUEUES_BB) / num_funcs;
+ *p_resc_num = NUM_OF_L2_QUEUES(p_dev) / num_funcs;
break;
case ECORE_VPORT:
- *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
- MAX_NUM_VPORTS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_VPORTS(p_dev) / num_funcs;
break;
case ECORE_RSS_ENG:
- *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
- ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+ *p_resc_num = NUM_OF_RSS_ENGINES(p_dev) / num_funcs;
break;
case ECORE_PQ:
- *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
- MAX_QM_TX_QUEUES_BB) / num_funcs;
+ *p_resc_num = NUM_OF_QM_TX_QUEUES(p_dev) / num_funcs;
+ *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
break;
case ECORE_RL:
- *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = NUM_OF_QM_GLOBAL_RLS(p_dev) / num_funcs;
break;
case ECORE_MAC:
case ECORE_VLAN:
*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case ECORE_ILT:
- *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
- PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_PXP_ILT_RECORDS(p_dev) / num_funcs;
break;
case ECORE_LL2_QUEUE:
- *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
break;
case ECORE_RDMA_CNQ_RAM:
case ECORE_CMDQS_CQS:
*p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
break;
case ECORE_RDMA_STATS_QUEUE:
- /* @DPDK */
- *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
- MAX_NUM_VPORTS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(p_dev) / num_funcs;
break;
case ECORE_BDQ:
/* @DPDK */
/* 4-ports mode has limitations that should be enforced:
* - BB: the MFW can access only PPFIDs which their corresponding PFIDs
* belong to this certain port.
- * - AH/E5: only 4 PPFIDs per port are available.
+ * - AH: only 4 PPFIDs per port are available.
*/
if (ecore_device_num_ports(p_dev) == 4) {
u8 mask;
{
struct ecore_resc_unlock_params resc_unlock_params;
struct ecore_resc_lock_params resc_lock_params;
- bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 max_ilt_lines;
u8 res_id;
enum _ecore_status_t rc;
#ifndef ASIC_ONLY
}
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev)) {
/* Reduced build contains less PQs */
- if (!(p_hwfn->p_dev->b_is_emul_full)) {
+ if (!(p_dev->b_is_emul_full)) {
resc_num[ECORE_PQ] = 32;
resc_start[ECORE_PQ] = resc_num[ECORE_PQ] *
p_hwfn->enabled_func_idx;
/* For AH emulation, since we have a possible maximal number of
* 16 enabled PFs, in case there are not enough ILT lines -
- * allocate only first PF as RoCE and have all the other ETH
- * only with less ILT lines.
+ * allocate only first PF as RoCE and have all the other as
+ * ETH-only with less ILT lines.
+ * In case we increase the number of ILT lines for PF0, we need
+ * also to correct the start value for PF1-15.
*/
- if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
- resc_num[ECORE_ILT] = OSAL_MAX_T(u32,
- resc_num[ECORE_ILT],
+ if (ECORE_IS_AH(p_dev) && p_dev->b_is_emul_full) {
+ if (!p_hwfn->rel_pf_id) {
+ resc_num[ECORE_ILT] =
+ OSAL_MAX_T(u32, resc_num[ECORE_ILT],
roce_min_ilt_lines);
+ } else if (resc_num[ECORE_ILT] < roce_min_ilt_lines) {
+ resc_start[ECORE_ILT] += roce_min_ilt_lines -
+ resc_num[ECORE_ILT];
+ }
+ }
}
-
- /* Correct the common ILT calculation if PF0 has more */
- if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
- p_hwfn->p_dev->b_is_emul_full &&
- p_hwfn->rel_pf_id && resc_num[ECORE_ILT] < roce_min_ilt_lines)
- resc_start[ECORE_ILT] += roce_min_ilt_lines -
- resc_num[ECORE_ILT];
#endif
/* Sanity for ILT */
- if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
- (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
+ max_ilt_lines = NUM_OF_PXP_ILT_RECORDS(p_dev);
+ if (RESC_END(p_hwfn, ECORE_ILT) > max_ilt_lines) {
DP_NOTICE(p_hwfn, true,
"Can't assign ILT pages [%08x,...,%08x]\n",
RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,
return rc;
}
+#ifndef ASIC_ONLY
+static enum _ecore_status_t
+ecore_emul_hw_get_nvm_info(struct ecore_hwfn *p_hwfn)
+{
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ /* The MF mode on emulation is either default or NPAR 1.0 */
+ p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_LL2_NON_UNICAST;
+ if (p_hwfn->num_funcs_on_port > 1)
+ p_dev->mf_bits |= 1 << ECORE_MF_INTER_PF_SWITCH |
+ 1 << ECORE_MF_DISABLE_ARFS;
+ else
+ p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
+ }
+
+ return ECORE_SUCCESS;
+}
+#endif
+
static enum _ecore_status_t
ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_link_params *link;
enum _ecore_status_t rc;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ return ecore_emul_hw_get_nvm_info(p_hwfn);
+#endif
+
/* Read global nvm_cfg address */
nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
-static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- struct ecore_dev *p_dev = p_hwfn->p_dev;
- u32 port_mode;
-
#ifndef ASIC_ONLY
- /* Read the port mode */
- if (CHIP_REV_IS_FPGA(p_dev))
- port_mode = 4;
- else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
- /* In CMT on emulation, assume 1 port */
- port_mode = 1;
- else
-#endif
- port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
-
- if (port_mode < 3) {
- p_dev->num_ports_in_engine = 1;
- } else if (port_mode <= 5) {
- p_dev->num_ports_in_engine = 2;
- } else {
- DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
- p_dev->num_ports_in_engine);
-
- /* Default num_ports_in_engine to something */
- p_dev->num_ports_in_engine = 1;
- }
-}
-
-static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
+static void ecore_emul_hw_info_port_num(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
- u32 port;
- int i;
+ u32 eco_reserved;
- p_dev->num_ports_in_engine = 0;
+ /* MISCS_REG_ECO_RESERVED[15:12]: num of ports in an engine */
+ eco_reserved = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_dev)) {
- port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
- switch ((port & 0xf000) >> 12) {
+ switch ((eco_reserved & 0xf000) >> 12) {
case 1:
p_dev->num_ports_in_engine = 1;
break;
break;
default:
DP_NOTICE(p_hwfn, false,
- "Unknown port mode in ECO_RESERVED %08x\n",
- port);
- }
- } else
-#endif
- for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
- port = ecore_rd(p_hwfn, p_ptt,
- CNIG_REG_NIG_PORT0_CONF_K2 +
- (i * 4));
- if (port & 1)
- p_dev->num_ports_in_engine++;
+ "Emulation: Unknown port mode [ECO_RESERVED 0x%08x]\n",
+ eco_reserved);
+ p_dev->num_ports_in_engine = 2; /* Default to something */
+ break;
}
- if (!p_dev->num_ports_in_engine) {
- DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n");
-
- /* Default num_ports_in_engine to something */
- p_dev->num_ports_in_engine = 1;
- }
+ p_dev->num_ports = p_dev->num_ports_in_engine *
+ ecore_device_num_engines(p_dev);
}
+#endif
+/* Determine the number of ports of the device and per engine */
static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 addr, global_offsize, global_addr;
- /* Determine the number of ports per engine */
- if (ECORE_IS_BB(p_dev))
- ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
- else
- ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_TEDIBEAR(p_dev)) {
+ p_dev->num_ports_in_engine = 1;
+ p_dev->num_ports = 2;
+ return;
+ }
+
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ ecore_emul_hw_info_port_num(p_hwfn, p_ptt);
+ return;
+ }
+#endif
- /* Get the total number of ports of the device */
- if (ECORE_IS_CMT(p_dev)) {
/* In CMT there is always only one port */
+ if (ECORE_IS_CMT(p_dev)) {
+ p_dev->num_ports_in_engine = 1;
p_dev->num_ports = 1;
-#ifndef ASIC_ONLY
- } else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {
- p_dev->num_ports = p_dev->num_ports_in_engine *
- ecore_device_num_engines(p_dev);
-#endif
- } else {
- u32 addr, global_offsize, global_addr;
+ return;
+ }
addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_GLOBAL);
global_addr = SECTION_ADDR(global_offsize, 0);
addr = global_addr + OFFSETOF(struct public_global, max_ports);
p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
- }
+
+ p_dev->num_ports_in_engine = p_dev->num_ports >>
+ (ecore_device_num_engines(p_dev) - 1);
}
static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
ecore_mcp_get_capabilities(p_hwfn, p_ptt);
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
-#endif
rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
if (rc != ECORE_SUCCESS)
return rc;
-#ifndef ASIC_ONLY
- }
-#endif
rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
protocol = p_hwfn->mcp_info->func_info.protocol;
p_hwfn->hw_info.personality = protocol;
}
-
#ifndef ASIC_ONLY
- /* To overcome ILT lack for emulation, until at least until we'll have
- * a definite answer from system about it, allow only PF0 to be RoCE.
+ else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ /* AH emulation:
+ * Allow only PF0 to be RoCE to overcome a lack of ILT lines.
*/
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
- if (!p_hwfn->rel_pf_id)
- p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
- else
+ if (ECORE_IS_AH(p_hwfn->p_dev) && p_hwfn->rel_pf_id)
p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+ else
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
}
#endif
return rc;
}
+#define ECORE_MAX_DEVICE_NAME_LEN (8)
+
+void ecore_get_dev_name(struct ecore_dev *p_dev, u8 *name, u8 max_chars)
+{
+ u8 n;
+
+ n = OSAL_MIN_T(u8, max_chars, ECORE_MAX_DEVICE_NAME_LEN);
+ OSAL_SNPRINTF((char *)name, n, "%s %c%d",
+ ECORE_IS_BB(p_dev) ? "BB" : "AH",
+ 'A' + p_dev->chip_rev, (int)p_dev->chip_metal);
+}
+
static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
}
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_BB(p_dev)) {
/* For some reason we have problems with this register
- * in B0 emulation; Simply assume no CMT
+ * in BB B0 emulation; Simply assume no CMT
*/
DP_NOTICE(p_dev->hwfns, false,
"device on emul - assume no CMT\n");
if (CHIP_REV_IS_EMUL(p_dev)) {
tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
- if (tmp & (1 << 29)) {
- DP_NOTICE(p_hwfn, false,
- "Emulation: Running on a FULL build\n");
- p_dev->b_is_emul_full = true;
- } else {
+
+ /* MISCS_REG_ECO_RESERVED[29]: full/reduced emulation build */
+ p_dev->b_is_emul_full = !!(tmp & (1 << 29));
+
+ /* MISCS_REG_ECO_RESERVED[28]: emulation build w/ or w/o MAC */
+ p_dev->b_is_emul_mac = !!(tmp & (1 << 28));
+
DP_NOTICE(p_hwfn, false,
- "Emulation: Running on a REDUCED build\n");
- }
+ "Emulation: Running on a %s build %s MAC\n",
+ p_dev->b_is_emul_full ? "full" : "reduced",
+ p_dev->b_is_emul_mac ? "with" : "without");
}
#endif
p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
/* First hwfn learns basic information, e.g., number of hwfns */
- if (!p_hwfn->my_id) {
+ if (IS_LEAD_HWFN(p_hwfn)) {
rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
}
}
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && !b_ptt_gtt_init) {
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ u32 val;
+
+ /* Initialize PTT/GTT (done by MFW on ASIC) */
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_START_INIT_PTT_GTT, 1);
+ OSAL_MSLEEP(10);
+ ecore_ptt_invalidate(p_hwfn);
+ val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_INIT_DONE_PTT_GTT);
+ if (val != 1) {
+ DP_ERR(p_hwfn,
+ "PTT and GTT init in PGLUE_B didn't complete\n");
+ goto err1;
+ }
+
+ /* Clear a possible PGLUE_B parity from a previous GRC access */
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_PRTY_STS_WR_H_0, 0x380);
+
+ b_ptt_gtt_init = true;
+ }
+#endif
+
+ /* Store the precompiled init data ptrs */
+ if (IS_LEAD_HWFN(p_hwfn))
+ ecore_init_iro_array(p_hwfn->p_dev);
+
ecore_hw_hwfn_prepare(p_hwfn);
/* Initialize MCP structure */
/* Check if mdump logs/data are present and update the epoch value */
if (IS_LEAD_HWFN(p_hwfn)) {
-#ifndef ASIC_ONLY
- if (!CHIP_REV_IS_EMUL(p_dev)) {
-#endif
rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
&mdump_info);
if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
p_params->epoch);
-#ifndef ASIC_ONLY
- }
-#endif
}
/* Allocate the init RT array and initialize the init-ops engine */
}
#ifndef ASIC_ONLY
if (CHIP_REV_IS_FPGA(p_dev)) {
- DP_NOTICE(p_hwfn, false,
- "FPGA: workaround; Prevent DMAE parities\n");
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2,
- 7);
+ if (ECORE_IS_AH(p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: workaround; Prevent DMAE parities\n");
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PCIE_REG_PRTY_MASK_K2, 7);
+ }
DP_NOTICE(p_hwfn, false,
"FPGA: workaround: Set VF bar0 size\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
- /* Store the precompiled init data ptrs */
- if (IS_PF(p_dev))
- ecore_init_iro_array(p_dev);
-
/* Initialize the first hwfn - will learn number of hwfns */
rc = ecore_hw_prepare_single(p_hwfn, p_dev->regview,
p_dev->doorbells, p_dev->db_phys_addr,
p_params->personality = p_hwfn->hw_info.personality;
- /* initilalize 2nd hwfn if necessary */
+ /* Initialize 2nd hwfn if necessary */
if (ECORE_IS_CMT(p_dev)) {
void OSAL_IOMEM *p_regview, *p_doorbell;
u8 OSAL_IOMEM *addr;
struct ecore_mcp_link_state *p_link;
int rc = ECORE_SUCCESS;
- p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
if (!p_link->min_pf_rate) {
p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
#include "ecore_init_ops.h"
#include "reg_addr.h"
#include "ecore_rt_defs.h"
-#include "ecore_hsi_common.h"
#include "ecore_hsi_init_func.h"
-#include "ecore_hsi_eth.h"
#include "ecore_hsi_init_tool.h"
#include "ecore_iro.h"
#include "ecore_init_fw_funcs.h"
-
-#define CDU_VALIDATION_DEFAULT_CFG 61
-
static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
- { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
- { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
- { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+ { 400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
+ { 528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
+ { 608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
};
static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
{ 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
/* General constants */
-#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
- QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
-#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
- 0)
+#define QM_PQ_MEM_4KB(pq_size) \
+ (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) \
+ (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
+/* Max link speed (in Mbps) */
+#define QM_MAX_LINK_SPEED 100000
+
/* Feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
#define QM_OTHER_PQS_PER_PF 4
/* VOQ constants */
-#define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
+#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
+#define VOQS_BIT_MASK ((1 << MAX_NUM_VOQS) - 1)
/* WFQ constants: */
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
/* Bit of PF in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
-#define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
+#define QM_WFQ_VP_PQ_PF_SHIFT 5
/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
/* Max WFQ increment value is 0.7 * upper bound */
#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
-/* Number of VOQs in E5 QmWfqCrd register */
-#define QM_WFQ_CRD_E5_NUM_VOQS 16
-
/* RL constants: */
/* Period in us */
/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
-
#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
ext_voq * \
} while (0)
#define WRITE_PQ_INFO_TO_RAM 1
-#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
- (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
- ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
-#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
-/******************** INTERNAL IMPLEMENTATION *********************/
+#define PQ_INFO_ELEMENT(vp_pq_id, pf, tc, port, rl_valid, rl_id) \
+ (((vp_pq_id) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
+ ((rl_valid ? 1 : 0) << 22) | (((rl_id) & 255) << 24) | \
+ (((rl_id) >> 8) << 9))
-/* Returns the external VOQ number */
-static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
- u8 port_id,
- u8 tc,
- u8 max_phys_tcs_per_port)
-{
- if (tc == PURE_LB_TC)
- return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
- else
- return port_id * (max_phys_tcs_per_port) + tc;
-}
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) (XSEM_REG_FAST_MEMORY + \
+ SEM_FAST_REG_INT_RAM + XSTORM_PQ_INFO_OFFSET(pq_id))
+
+/******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare PF RL enable/disable runtime init values */
static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- u8 num_ext_voqs = MAX_NUM_VOQS_E4;
- u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
-
/* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
- (u32)voq_bit_mask);
-#ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
- if (num_ext_voqs >= 32)
- STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
- (u32)(voq_bit_mask >> 32));
-#endif
+ VOQS_BIT_MASK);
/* Write RL period */
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
}
-/* Prepare VPORT RL enable/disable runtime init values */
-static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
+/* Prepare global RL enable/disable runtime init values */
+static void ecore_enable_global_rl(struct ecore_hwfn *p_hwfn,
+ bool global_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
- vport_rl_en ? 1 : 0);
- if (vport_rl_en) {
+ global_rl_en ? 1 : 0);
+ if (global_rl_en) {
/* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
* the specified VOQ
*/
static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
- u8 ext_voq,
+ u8 voq,
u16 cmdq_lines)
{
- u32 qm_line_crd;
+ u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
- qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
-
- OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
- STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
- qm_line_crd);
- STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
- qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+ qm_line_crd);
}
/* Prepare runtime init values to allocate PBF command queue lines. */
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- u8 tc, ext_voq, port_id, num_tcs_in_port;
- u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u8 tc, voq, port_id, num_tcs_in_port;
/* Clear PBF lines of all VOQs */
- for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
- STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
+ for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
u16 phys_lines, phys_lines_per_tc;
continue;
/* Find number of command queue lines to divide between the
- * active physical TCs. In E5, 1/8 of the lines are reserved.
- * the lines for pure LB TC are subtracted.
+ * active physical TCs.
*/
phys_lines = port_params[port_id].num_pbf_cmd_lines;
phys_lines -= PBF_CMDQ_PURE_LB_LINES;
/* Init registers per active TC */
for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
- ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
- max_phys_tcs_per_port);
- if (((port_params[port_id].active_phys_tcs >> tc) &
- 0x1) == 1)
- ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
+ voq = VOQ(port_id, tc, max_phys_tcs_per_port);
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
phys_lines_per_tc);
}
/* Init registers for pure LB TC */
- ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
- max_phys_tcs_per_port);
- ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
+ voq = VOQ(port_id, PURE_LB_TC, max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
PBF_CMDQ_PURE_LB_LINES);
}
}
port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- u8 tc, ext_voq, port_id, num_tcs_in_port;
+ u8 tc, voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (!port_params[port_id].active)
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >> tc) &
0x1) == 1) {
- ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
- max_phys_tcs_per_port);
+ voq = VOQ(port_id, tc, max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
+ PBF_BTB_GUARANTEED_RT_OFFSET(voq),
phys_blocks);
}
}
/* Init pure LB TC */
- ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
- max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
+ voq = VOQ(port_id, PURE_LB_TC, max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
pure_lb_blocks);
}
}
+/* Prepare runtime init values for the specified RL.
+ * If global_rl_params is OSAL_NULL, max link speed (100Gbps) is used instead.
+ * Return -1 on error.
+ */
+static int ecore_global_rl_rt_init(struct ecore_hwfn *p_hwfn,
+ struct init_qm_global_rl_params
+ global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])
+{
+ u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT;
+ u32 inc_val;
+ u16 rl_id;
+
+ /* Go over all global RLs */
+ for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
+ u32 rate_limit = global_rl_params ?
+ global_rl_params[rl_id].rate_limit : 0;
+
+ inc_val = QM_RL_INC_VAL(rate_limit ?
+ rate_limit : QM_MAX_LINK_SPEED);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(QM_MAX_LINK_SPEED)) {
+ DP_NOTICE(p_hwfn, true, "Invalid rate limit configuration.\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
/* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 pf_id,
u8 max_phys_tcs_per_port,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
- u8 start_vport,
+ u16 start_vport,
u32 base_mem_addr_4kb,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+ #if (WRITE_PQ_INFO_TO_RAM != 0)
+ u32 pq_info = 0;
+ #endif
num_pqs = num_pf_pqs + num_vf_pqs;
/* Go over all Tx PQs */
for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
- u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
- u8 ext_voq, vport_id_in_pf;
- bool is_vf_pq, rl_valid;
- u16 first_tx_pq_id;
-
- ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
- pq_params[i].tc_id,
- max_phys_tcs_per_port);
+ u16 first_tx_pq_id, vport_id_in_pf;
+ struct qm_rf_pq_map tx_pq_map;
+ bool is_vf_pq;
+ u8 voq;
+
+ voq = VOQ(pq_params[i].port_id, pq_params[i].tc_id,
+ max_phys_tcs_per_port);
is_vf_pq = (i >= num_pf_pqs);
- rl_valid = pq_params[i].rl_valid > 0;
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - start_vport;
first_tx_pq_id =
vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
- u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
- (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
+ u32 map_val = (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (pf_id << QM_WFQ_VP_PQ_PF_SHIFT);
/* Create new VP PQ */
vport_params[vport_id_in_pf].
first_tx_pq_id, map_val);
}
- /* Check RL ID */
- if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
- DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter config\n");
- rl_valid = false;
- }
-
/* Prepare PQ map entry */
- struct qm_rf_pq_map tx_pq_map;
-
QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, pq_id, first_tx_pq_id,
- rl_valid ? 1 : 0,
- rl_valid ? pq_params[i].vport_id : 0,
- ext_voq, pq_params[i].wrr_group);
+ pq_params[i].rl_valid, pq_params[i].rl_id,
+ voq, pq_params[i].wrr_group);
/* Set PQ base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
(pq_id * 2) + j, 0);
/* Write PQ info to RAM */
- if (WRITE_PQ_INFO_TO_RAM != 0) {
- u32 pq_info = 0;
-
- pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
- pq_params[i].tc_id,
- pq_params[i].port_id,
- rl_valid ? 1 : 0, rl_valid ?
- pq_params[i].vport_id : 0);
- ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
- pq_info);
- }
+#if (WRITE_PQ_INFO_TO_RAM != 0)
+ pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
+ pq_params[i].tc_id,
+ pq_params[i].port_id,
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id);
+ ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+ pq_info);
+#endif
/* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
if (tx_pq_vf_mask[i])
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
i, tx_pq_vf_mask[i]);
+
+ return 0;
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
struct init_qm_pq_params *pq_params)
{
u32 inc_val, crd_reg_offset;
- u8 ext_voq;
+ u8 voq;
u16 i;
inc_val = QM_WFQ_INC_VAL(pf_wfq);
}
for (i = 0; i < num_tx_pqs; i++) {
- ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
- pq_params[i].tc_id,
- max_phys_tcs_per_port);
+ voq = VOQ(pq_params[i].port_id, pq_params[i].tc_id,
+ max_phys_tcs_per_port);
crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
QM_REG_WFQPFCRD_RT_OFFSET :
QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
- ext_voq * MAX_NUM_PFS_BB +
+ voq * MAX_NUM_PFS_BB +
(pf_id % MAX_NUM_PFS_BB);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
* Return -1 on error.
*/
static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
- u8 num_vports,
+ u16 num_vports,
struct init_qm_vport_params *vport_params)
{
- u16 vport_pq_id;
+ u16 vp_pq_id, vport_id;
u32 inc_val;
- u8 tc, i;
+ u8 tc;
/* Go over all PF VPORTs */
- for (i = 0; i < num_vports; i++) {
- if (!vport_params[i].wfq)
+ for (vport_id = 0; vport_id < num_vports; vport_id++) {
+ if (!vport_params[vport_id].wfq)
continue;
- inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
+ inc_val = QM_WFQ_INC_VAL(vport_params[vport_id].wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT WFQ weight configuration\n");
/* Each VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- vport_pq_id = vport_params[i].first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID) {
- STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
- vport_pq_id,
- (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPWEIGHT_RT_OFFSET +
- vport_pq_id, inc_val);
- }
- }
- }
- return 0;
-}
-
-/* Prepare VPORT RL runtime init values for the specified VPORTs.
- * Return -1 on error.
- */
-static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
- u8 start_vport,
- u8 num_vports,
- u32 link_speed,
- struct init_qm_vport_params *vport_params)
-{
- u8 i, vport_id;
- u32 inc_val;
-
- if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
- DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter configuration\n");
- return -1;
- }
-
- /* Go over all PF VPORTs */
- for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- inc_val = QM_RL_INC_VAL(link_speed);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
- DP_NOTICE(p_hwfn, true,
- "Invalid VPORT rate-limit configuration\n");
- return -1;
+ vp_pq_id = vport_params[vport_id].first_tx_pq_id[tc];
+ if (vp_pq_id == QM_INVALID_PQ_ID)
+ continue;
+
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vp_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vp_pq_id, inc_val);
}
-
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_VP_RL_UPPER_BOUND(link_speed) |
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
- inc_val);
}
return 0;
return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
}
-
/******************** INTERFACE IMPLEMENTATION *********************/
-u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
+u32 ecore_qm_pf_mem_size(struct ecore_hwfn *p_hwfn,
+ u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u8 max_phys_tcs_per_port,
bool pf_rl_en,
bool pf_wfq_en,
- bool vport_rl_en,
+ bool global_rl_en,
bool vport_wfq_en,
struct init_qm_port_params
- port_params[MAX_NUM_PORTS])
+ port_params[MAX_NUM_PORTS],
+ struct init_qm_global_rl_params
+ global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])
{
- u32 mask;
+ u32 mask = 0;
/* Init AFullOprtnstcCrdMask */
- mask = (QM_OPPOR_LINE_VOQ_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
- (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
- (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
- (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
- (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
- (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
- (QM_OPPOR_FW_STOP_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
- (QM_OPPOR_PQ_EMPTY_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
+ QM_OPPOR_LINE_VOQ_DEF);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, pf_wfq_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, vport_wfq_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, pf_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, global_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY,
+ QM_OPPOR_PQ_EMPTY_DEF);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
/* Enable/disable PF RL */
/* Enable/disable PF WFQ */
ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
- /* Enable/disable VPORT RL */
- ecore_enable_vport_rl(p_hwfn, vport_rl_en);
+ /* Enable/disable global RL */
+ ecore_enable_global_rl(p_hwfn, global_rl_en);
/* Enable/disable VPORT WFQ */
ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
max_phys_tcs_per_port, port_params);
+ ecore_global_rl_rt_init(p_hwfn, global_rl_params);
+
return 0;
}
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
- u8 start_vport,
- u8 num_vports,
+ u16 start_vport,
+ u16 num_vports,
u16 pf_wfq,
u32 pf_rl,
- u32 link_speed,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
u32 other_mem_size_4kb;
- u8 tc, i;
+ u16 vport_id;
+ u8 tc;
other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
QM_OTHER_PQS_PER_PF;
/* Clear first Tx PQ ID array for each VPORT */
- for (i = 0; i < num_vports; i++)
+ for (vport_id = 0; vport_id < num_vports; vport_id++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
- vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+ vport_params[vport_id].first_tx_pq_id[tc] =
+ QM_INVALID_PQ_ID;
/* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
#endif
/* Map Tx PQs */
- ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
- is_pf_loading, num_pf_cids, num_vf_cids,
- start_pq, num_pf_pqs, num_vf_pqs, start_vport,
- other_mem_size_4kb, pq_params, vport_params);
+ if (ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
+ is_pf_loading, num_pf_cids, num_vf_cids,
+ start_pq, num_pf_pqs, num_vf_pqs,
+ start_vport, other_mem_size_4kb, pq_params,
+ vport_params))
+ return -1;
/* Init PF WFQ */
if (pf_wfq)
if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
return -1;
- /* Set VPORT WFQ */
+ /* Init VPORT WFQ */
if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
return -1;
- /* Set VPORT RL */
- if (ecore_vport_rl_rt_init
- (p_hwfn, start_vport, num_vports, link_speed, vport_params))
- return -1;
-
return 0;
}
int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+ u16 first_tx_pq_id[NUM_OF_TCS],
+ u16 wfq)
{
- u16 vport_pq_id;
+ u16 vp_pq_id;
u32 inc_val;
u8 tc;
- inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ inc_val = QM_WFQ_INC_VAL(wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT WFQ weight configuration\n");
return -1;
}
+ /* A VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- vport_pq_id = first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID) {
+ vp_pq_id = first_tx_pq_id[tc];
+ if (vp_pq_id != QM_INVALID_PQ_ID) {
ecore_wr(p_hwfn, p_ptt,
- QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
+ QM_REG_WFQVPWEIGHT + vp_pq_id * 4, inc_val);
}
}
+ return 0;
+ }
+
+int ecore_init_global_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rl_id,
+ u32 rate_limit)
+{
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(rate_limit);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
+ DP_NOTICE(p_hwfn, true, "Invalid rate limit configuration.\n");
+ return -1;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + rl_id * 4,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
+
return 0;
}
return true;
}
+#ifndef UNUSED_HSI_FUNC
/* NIG: ETS configuration constants */
#define NIG_TX_ETS_CLIENT_OFFSET 4
}
}
+#endif /* UNUSED_HSI_FUNC */
+
+#ifndef UNUSED_HSI_FUNC
/* PRS: ETS configuration constants */
#define PRS_ETS_MIN_WFQ_BYTES 1600
}
}
+#endif /* UNUSED_HSI_FUNC */
+#ifndef UNUSED_HSI_FUNC
/* BRB: RAM configuration constants */
#define BRB_TOTAL_RAM_BLOCKS_BB 4800
}
}
-/* In MF should be called once per port to set EtherType of OuterTag */
+#endif /* UNUSED_HSI_FUNC */
+#ifndef UNUSED_HSI_FUNC
+
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ u32 i; \
+ for (i = 0; i < (arr_size); i++) \
+ ecore_wr(dev, ptt, ((addr) + (4 * i)), \
+ ((u32 *)&(arr))[i]); \
+ } while (0)
+
+#ifndef DWORDS_TO_BYTES
+#define DWORDS_TO_BYTES(dwords) ((dwords) * REG_SIZE)
+#endif
+
+
+/**
+ * @brief ecore_dmae_to_grc - is an internal function - writes from host to
+ * wide-bus registers (split registers are not supported yet)
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pData - pointer to source data.
+ * @param addr - Destination register address.
+ * @param len_in_dwords - data length in DWARDS (u32)
+ */
+static int ecore_dmae_to_grc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *pData,
+ u32 addr,
+ u32 len_in_dwords)
+{
+ struct dmae_params params;
+ bool read_using_dmae = false;
+
+ if (!pData)
+ return -1;
+
+ /* Set DMAE params */
+ OSAL_MEMSET(¶ms, 0, sizeof(params));
+
+ SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 1);
+
+ /* Execute DMAE command */
+ read_using_dmae = !ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)(pData),
+ addr, len_in_dwords, ¶ms);
+ if (!read_using_dmae)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
+ "Failed writing to chip using DMAE, using GRC instead\n");
+
+ /* If not read using DMAE, read using GRC */
+ if (!read_using_dmae)
+ /* write to registers using GRC */
+ ARR_REG_WR(p_hwfn, p_ptt, addr, pData, len_in_dwords);
+
+ return len_in_dwords;
+}
+
+/* In MF, should be called once per port to set EtherType of OuterTag */
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
{
/* Update DORQ register */
STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
}
+#endif /* UNUSED_HSI_FUNC */
+
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
ip_geneve_enable ? 1 : 0);
}
-#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
/* set VXLAN_NO_L2_ENABLE flag */
reg_val |= cfg_mask;
- /* update PRS FIC register */
+ /* update PRS FIC Format register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
- } else {
/* clear VXLAN_NO_L2_ENABLE flag */
reg_val &= ~cfg_mask;
}
ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
}
+#ifndef UNUSED_HSI_FUNC
+
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
struct ecore_ptt *p_ptt,
u16 pf_id)
{
+ struct regpair ram_line;
+ OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
+
/* disable gft search for PF */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
/* Zero ramline */
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * pf_id, 0);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
+ ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
+
}
bool ipv6,
enum gft_profile_type profile_type)
{
- u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
+ u32 reg_val, cam_line, search_non_ip_as_gft;
+ struct regpair ram_line = { 0 };
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
- ram_line_lo = 0;
- ram_line_hi = 0;
/* Search no IP as GFT */
search_non_ip_as_gft = 0;
/* Tunnel type */
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
/* Allow tunneled traffic without inner IP */
search_non_ip_as_gft = 1;
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
search_non_ip_as_gft);
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
- ram_line_lo);
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
- REG_SIZE, ram_line_hi);
+ ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
/* Set default profile so that no filter match will happen */
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+ ram_line.lo = 0xffffffff;
+ ram_line.hi = 0x3ff;
+ ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH,
+ sizeof(ram_line) / REG_SIZE);
/* Enable gft search */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}
+
+#endif /* UNUSED_HSI_FUNC */
+
/* Configure VF zone size mode */
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 mode,
/* Calculate and return CDU validation byte per connection type / region /
* cid
*/
-static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
+static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
+ u8 conn_type, u8 region, u32 cid)
{
- const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
-
static u8 crc8_table_valid; /*automatically initialized to 0*/
u8 crc, validation_byte = 0;
u32 validation_string = 0;
* [7:4] = Region
* [3:0] = Type
*/
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
- validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
- validation_string |= ((region & 0xF) << 4);
+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
+ CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+#endif
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
- validation_string |= (conn_type & 0xF);
+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
+ CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
+#endif
+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
+ CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
+#endif
/* Convert to big-endian and calculate CRC8*/
data_to_crc = OSAL_BE32_TO_CPU(validation_string);
* [6:3] = connection_type[3:0]
* [2:0] = crc[2:0]
*/
-
- validation_byte |= ((validation_cfg >>
+ validation_byte |= ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >>
CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
- if ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
- validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
- else
- validation_byte |= crc & 0x7F;
-
+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \
+ CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+#else
+ validation_byte |= crc & 0x7F;
+#endif
return validation_byte;
}
/* Calcualte and set validation bytes for session context */
-void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem, u16 ctx_size,
u8 ctx_type, u32 cid)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
p_ctx = (u8 *)p_ctx_mem;
+
x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
- *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
- *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
+ *x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
}
/* Calcualte and set validation bytes for task context */
-void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
- u32 tid)
+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid)
{
u8 *p_ctx, *region1_val_ptr;
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 1,
+ tid);
}
/* Memset session context to 0 while preserving validation bytes */
-void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
+ u32 ctx_size, u8 ctx_type)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
u8 x_val, t_val, u_val;
p_ctx = (u8 *)p_ctx_mem;
+
x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
}
/* Memset task context to 0 while preserving validation bytes */
-void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
+ u32 ctx_size, u8 ctx_type)
{
u8 *p_ctx, *region1_val_ptr;
u8 region1_val;
{
u32 ctx_validation;
- /* Enable validation for connection region 3 - bits [31:24] */
- ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+ /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
+ ctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 24;
ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
- /* Enable validation for connection region 5 - bits [15: 8] */
- ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
+ ctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 8;
ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
- /* Enable validation for connection region 1 - bits [15: 8] */
- ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
+ ctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 8;
ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
-
-
-/*******************************************************************************
- * File name : rdma_init.c
- * Author : Michael Shteinbok
- *******************************************************************************
- *******************************************************************************
- * Description:
- * RDMA HSI functions
- *
- *******************************************************************************
- * Notes: This is the input to the auto generated file drv_init_fw_funcs.c
- *
- *******************************************************************************
- */
-static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,
- u8 storm_id)
-{
- switch (storm_id) {
- case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
- case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
- case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
- case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
- case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
- case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
- PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
-
- default: return 0;
- }
-}
-
-void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 assert_level[NUM_STORMS])
-{
- u8 storm_id;
- for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
- u32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);
-
- ecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
- }
-}
#ifndef _INIT_FW_FUNCS_H
#define _INIT_FW_FUNCS_H
-/* Forward declarations */
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_eth.h"
+
+/* Physical memory descriptor */
+struct phys_mem_desc {
+ dma_addr_t phys_addr;
+ void *virt_addr;
+ u32 size; /* In bytes */
+};
+
+/* Returns the VOQ based on port and TC */
+#define VOQ(port, tc, max_phys_tcs_per_port) \
+ ((tc) == PURE_LB_TC ? NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + (port) : \
+ (port) * (max_phys_tcs_per_port) + (tc))
struct init_qm_pq_params;
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
+ * @param p_hwfn - HW device data
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
*
* @return The required host memory size in 4KB units.
*/
-u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
+u32 ecore_qm_pf_mem_size(struct ecore_hwfn *p_hwfn,
+ u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
* @param pf_rl_en - enable per-PF rate limiters
* @param pf_wfq_en - enable per-PF WFQ
- * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param global_rl_en - enable global rate limiters
* @param vport_wfq_en - enable per-VPORT WFQ
- * @param port_params - array of size MAX_NUM_PORTS with params for each port
+ * @param port_params - array with parameters for each port.
+ * @param global_rl_params - array with parameters for each global RL.
+ * If OSAL_NULL, global RLs are not configured.
*
* @return 0 on success, -1 on error.
*/
int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
- u8 max_ports_per_engine,
- u8 max_phys_tcs_per_port,
- bool pf_rl_en,
- bool pf_wfq_en,
- bool vport_rl_en,
- bool vport_wfq_en,
- struct init_qm_port_params port_params[MAX_NUM_PORTS]);
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ bool pf_rl_en,
+ bool pf_wfq_en,
+ bool global_rl_en,
+ bool vport_wfq_en,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS],
+ struct init_qm_global_rl_params
+ global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]);
/**
* @brief ecore_qm_pf_rt_init Prepare QM runtime init values for the PF phase
* be 0. otherwise, the weight must be non-zero.
* @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't
* configure. ignored if PF RL is globally disabled.
- * @param link_speed - link speed in Mbps.
* @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for
* each Tx PQ associated with the specified PF.
* @param vport_params - array of size num_vports with parameters for each
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
- u8 start_vport,
- u8 num_vports,
+ u16 start_vport,
+ u16 num_vports,
u16 pf_wfq,
u32 pf_rl,
- u32 link_speed,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params);
* @param first_tx_pq_id- An array containing the first Tx PQ ID associated
* with the VPORT for each TC. This array is filled by
* ecore_qm_pf_rt_init
- * @param vport_wfq - WFQ weight. Must be non-zero.
+ * @param wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS],
- u16 vport_wfq);
+ u16 wfq);
+
+/**
+ * @brief ecore_init_global_rl - Initializes the rate limit of the specified
+ * rate limiter.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param rl_id - RL ID
+ * @param rate_limit - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_global_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rl_id,
+ u32 rate_limit);
/**
* @brief ecore_init_vport_rl - Initializes the rate limit of the specified
/**
* @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp
- * port
+ * port.
*
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - vxlan destination udp port.
*/
/**
* @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
*
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param vxlan_enable - vxlan enable flag.
*/
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param eth_gre_enable - eth GRE enable enable flag.
* @param ip_gre_enable - IP GRE enable enable flag.
* @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination
* udp port
*
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - geneve destination udp port.
*/
u16 dest_port);
/**
- * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ * @brief ecore_set_geneve_enable - enable or disable GRE tunnel in HW
*
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param eth_geneve_enable - eth GENEVE enable enable flag.
* @param ip_geneve_enable - IP GENEVE enable enable flag.
struct ecore_ptt *p_ptt);
/**
- * @brief ecore_gft_disable - Disable and GFT
+ * @brief ecore_gft_disable - Disable GFT
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
/**
* @brief ecore_gft_config - Enable and configure HW for GFT
*
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param pf_id - pf on which to enable GFT.
* @param tcp - set profile tcp packets.
* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
* used before first ETH queue started.
*
-*
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers. Don't care
-* if runtime_init used
+ * if runtime_init used.
* @param mode - VF zone size mode. Use enum vf_zone_size_mode.
-* @param runtime_init - Set 1 to init runtime registers in engine phase. Set 0
-* if VF zone size mode configured after engine phase.
+ * @param runtime_init - Set 1 to init runtime registers in engine phase.
+ * Set 0 if VF zone size mode configured after engine
+ * phase.
*/
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt
*p_ptt, u16 mode, bool runtime_init);
* @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by
* VF zone size mode.
*
+ * @param p_hwfn - HW device data
* @param stat_cnt_id - statistic counter id
* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
*/
* @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
* size mode.
*
+ * @param p_hwfn - HW device data
* @param vf_id - vf id.
* @param vf_queue_id - per VF rx queue id.
* @param vf_zone_size_mode - vf zone size mode. Use enum vf_zone_size_mode.
* @brief ecore_enable_context_validation - Enable and configure context
* validation.
*
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
*/
void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
* @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
* session context.
*
+ * @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param cid - context cid.
*/
-void ecore_calc_session_ctx_validation(void *p_ctx_mem,
+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 cid);
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
* context.
*
+ * @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param tid - context tid.
*/
-void ecore_calc_task_ctx_validation(void *p_ctx_mem,
+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 tid);
* @param ctx_size - size to initialzie.
* @param ctx_type - context type.
*/
-void ecore_memset_session_ctx(void *p_ctx_mem,
+void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);
+
/**
* @brief ecore_memset_task_ctx - Memset task context to 0 while preserving
* validation bytes.
*
+ * @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - size to initialzie.
* @param ctx_type - context type.
*/
-void ecore_memset_task_ctx(void *p_ctx_mem,
+void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);
#include "ecore_iro_values.h"
#include "ecore_sriov.h"
-#include "ecore_gtt_values.h"
#include "reg_addr.h"
#include "ecore_init_ops.h"
void ecore_init_iro_array(struct ecore_dev *p_dev)
{
- p_dev->iro_arr = iro_arr;
+ p_dev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
}
/* Runtime configuration helpers */
int phase, int phase_id, int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
+ bool b_dmae = (phase != PHASE_ENGINE);
u32 cmd_num, num_init_ops;
union init_op *init;
- bool b_dmae = false;
enum _ecore_status_t rc = ECORE_SUCCESS;
num_init_ops = p_dev->fw_data->init_ops_size;
case INIT_OP_IF_PHASE:
cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
phase_id);
- b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
/* ecore_init_run is always invoked from
case INIT_OP_CALLBACK:
rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ if (phase == PHASE_ENGINE &&
+ cmd->callback.callback_id == DMAE_READY_CB)
+ b_dmae = true;
break;
}
fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
+ offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
+ fw->fw_overlays = (u32 *)(fw_data + offset);
+ len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
+ fw->fw_overlays_len = len;
#else
fw->init_ops = (union init_op *)init_ops;
fw->arr_data = (u32 *)init_val;
fw->modes_tree_buf = (u8 *)modes_tree_buf;
fw->init_ops_size = init_ops_size;
+ fw->fw_overlays = fw_overlays;
+ fw->fw_overlays_len = sizeof(fw_overlays);
#endif
return ECORE_SUCCESS;
osal_size_t size);
#define STORE_RT_REG_AGG(hwfn, offset, val) \
- ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+ ecore_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val))
#endif /* __ECORE_INIT_OPS__ */
struct ecore_sb_sp_info {
struct ecore_sb_info sb_info;
- /* per protocol index data */
+
+ /* Per protocol index data */
struct ecore_pi_info pi_info_arr[MAX_PIS_PER_SB];
+ osal_size_t pi_info_arr_size;
};
enum ecore_attention_type {
#define ATTENTION_OFFSET_MASK (0x000ff000)
#define ATTENTION_OFFSET_SHIFT (12)
-#define ATTENTION_BB_MASK (0x00700000)
+#define ATTENTION_BB_MASK (0xf)
#define ATTENTION_BB_SHIFT (20)
#define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT)
-#define ATTENTION_BB_DIFFERENT (1 << 23)
+#define ATTENTION_BB_DIFFERENT (1 << 24)
#define ATTENTION_CLEAR_ENABLE (1 << 28)
unsigned int flags;
AEU_INVERT_REG_SPECIAL_CNIG_1,
AEU_INVERT_REG_SPECIAL_CNIG_2,
AEU_INVERT_REG_SPECIAL_CNIG_3,
+ AEU_INVERT_REG_SPECIAL_MCP_UMP_TX,
+ AEU_INVERT_REG_SPECIAL_MCP_SCPAD,
AEU_INVERT_REG_SPECIAL_MAX,
};
{"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
{"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
{"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
};
/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
{"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
{"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
- {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
- MAX_BLOCK_ID},
+ {"OPTE", ATTENTION_PAR, OSAL_NULL, BLOCK_OPTE},
+ {"MCP", ATTENTION_PAR, OSAL_NULL, BLOCK_MCP},
+ {"MS", ATTENTION_SINGLE, OSAL_NULL, BLOCK_MS},
+ {"UMAC", ATTENTION_SINGLE, OSAL_NULL, BLOCK_UMAC},
+ {"LED", ATTENTION_SINGLE, OSAL_NULL, BLOCK_LED},
+ {"BMBN", ATTENTION_SINGLE, OSAL_NULL, BLOCK_BMBN},
{"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
{"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
+ {"BMB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
{"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
{"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
{"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
{"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
{"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
MAX_BLOCK_ID},
- {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
- {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
- {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
- MAX_BLOCK_ID},
+ {"AVS", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_UMP_TX), OSAL_NULL,
+ BLOCK_AVS_WRAP},
+ {"AVS", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_SCPAD), OSAL_NULL,
+ BLOCK_AVS_WRAP},
+ {"PCIe core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PCIe link up", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PCIe hot reset", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"Reserved %d", (9 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
}
},
/* @DPDK */
/* Reach assertion if attention is fatal */
if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
+#ifndef ASIC_ONLY
+ DP_NOTICE(p_hwfn, !CHIP_REV_IS_EMUL(p_hwfn->p_dev),
+ "`%s': Fatal attention\n", p_bit_name);
+#else
DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
p_bit_name);
+#endif
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
}
/* Prevent this Attention from being asserted in the future */
if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
+#ifndef ASIC_ONLY
+ CHIP_REV_IS_EMUL(p_hwfn->p_dev) ||
+#endif
p_hwfn->p_dev->attn_clr_en) {
u32 val;
u32 mask = ~bitmask;
p_aeu->bit_name);
}
+#define MISC_REG_AEU_AFTER_INVERT_IGU(n) \
+ (MISC_REG_AEU_AFTER_INVERT_1_IGU + (n) * 0x4)
+
+#define MISC_REG_AEU_ENABLE_IGU_OUT(n, group) \
+ (MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (n) * 0x4 + \
+ (group) * 0x4 * NUM_ATTN_REGS)
+
/**
* @brief - handles deassertion of previously asserted attentions.
*
/* Read the attention registers in the AEU */
for (i = 0; i < NUM_ATTN_REGS; i++) {
aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- MISC_REG_AEU_AFTER_INVERT_1_IGU +
- i * 0x4);
+ MISC_REG_AEU_AFTER_INVERT_IGU(i));
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
"Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
}
struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
u32 parities;
- aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
+ aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, 0);
en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
for (i = 0; i < NUM_ATTN_REGS; i++) {
u32 bits;
- aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
- i * sizeof(u32) +
- k * sizeof(u32) * NUM_ATTN_REGS;
+ aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, k);
en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
bits = aeu_inv_arr[i] & en;
struct ecore_pi_info *pi_info = OSAL_NULL;
struct ecore_sb_attn_info *sb_attn;
struct ecore_sb_info *sb_info;
- int arr_size;
u16 rc = 0;
if (!p_hwfn)
}
sb_info = &p_hwfn->p_sp_sb->sb_info;
- arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
if (!sb_info) {
DP_ERR(p_hwfn->p_dev,
"Status block is NULL - cannot ack interrupts\n");
ecore_int_attentions(p_hwfn);
if (rc & ECORE_SB_IDX) {
- int pi;
+ osal_size_t pi;
/* Since we only looked at the SB index, it's possible more
* than a single protocol-index on the SB incremented.
* Iterate over all configured protocol indices and check
* whether something happened for each.
*/
- for (pi = 0; pi < arr_size; pi++) {
+ for (pi = 0; pi < p_hwfn->p_sp_sb->pi_info_arr_size; pi++) {
pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
if (pi_info->comp_cb != OSAL_NULL)
pi_info->comp_cb(p_hwfn, pi_info->cookie);
if (IS_VF(p_hwfn->p_dev))
return;/* @@@TBD MichalK- VF CAU... */
- sb_offset = igu_sb_id * MAX_PIS_PER_SB;
+ sb_offset = igu_sb_id * PIS_PER_SB;
OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
{
/* zero status block and ack counter */
sb_info->sb_ack = 0;
- OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+ OSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size);
if (IS_PF(p_hwfn->p_dev))
ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
dma_addr_t sb_phy_addr, u16 sb_id)
{
sb_info->sb_virt = sb_virt_addr;
+ struct status_block *sb_virt;
+
+ sb_virt = (struct status_block *)sb_info->sb_virt;
+
+ sb_info->sb_size = sizeof(*sb_virt);
+ sb_info->sb_pi_array = sb_virt->pi_array;
+ sb_info->sb_prod_index = &sb_virt->prod_index;
+
sb_info->sb_phys = sb_phy_addr;
sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
/* The igu address will hold the absolute address that needs to be
* written to for a specific status block
*/
- if (IS_PF(p_hwfn->p_dev)) {
+ if (IS_PF(p_hwfn->p_dev))
sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ (sb_info->igu_sb_id << 3);
- } else {
- sb_info->igu_addr =
- (u8 OSAL_IOMEM *)p_hwfn->regview +
+ else
+ sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
PXP_VF_BAR0_START_IGU +
- ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
- }
+ ((IGU_CMD_INT_ACK_BASE +
+ sb_info->igu_sb_id) << 3);
sb_info->flags |= ECORE_SB_INFO_INIT;
/* zero status block and ack counter */
sb_info->sb_ack = 0;
- OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+ OSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size);
if (IS_VF(p_hwfn->p_dev)) {
ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
void *p_virt;
/* SB struct */
- p_sb =
- OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(*p_sb));
+ p_sb = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb));
if (!p_sb) {
- DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n");
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `struct ecore_sb_info'\n");
return ECORE_NOMEM;
}
ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
p_virt, p_phys, ECORE_SP_SB_ID);
- OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+ p_sb->pi_info_arr_size = PIS_PER_SB;
return ECORE_SUCCESS;
}
u8 pi;
/* Look for a free index */
- for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+ for (pi = 0; pi < p_sp_sb->pi_info_arr_size; pi++) {
if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
continue;
p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
p_sp_sb->pi_info_arr[pi].cookie = cookie;
*sb_idx = pi;
- *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ *p_fw_cons = &p_sp_sb->sb_info.sb_pi_array[pi];
rc = ECORE_SUCCESS;
break;
}
bool cleanup_set,
u16 opaque_fid)
{
- u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
- u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
- u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
- u8 type = 0; /* FIXME MichalS type??? */
+ u32 data = 0, cmd_ctrl = 0, sb_bit, sb_bit_addr, pxp_addr;
+ u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH, val;
+ u8 type = 0;
OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
IGU_REG_CLEANUP_STATUS_0) != 0x200);
SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
/* Set the control register */
+ pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
igu_sb_id);
/* Clear the CAU for the SB */
- for (pi = 0; pi < 12; pi++)
+ for (pi = 0; pi < PIS_PER_SB; pi++)
ecore_wr(p_hwfn, p_ptt,
- CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
+ CAU_REG_PI_MEMORY +
+ (igu_sb_id * PIS_PER_SB + pi) * 4,
+ 0);
}
void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
struct ecore_sb_info_dbg *p_info)
{
u16 sbid = p_sb->igu_sb_id;
- int i;
+ u32 i;
if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
- if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
+ if (sbid >= NUM_OF_SBS(p_hwfn->p_dev))
return ECORE_INVAL;
p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
IGU_REG_CONSUMER_MEM + sbid * 4);
- for (i = 0; i < MAX_PIS_PER_SB; i++)
+ for (i = 0; i < PIS_PER_SB; i++)
p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
CAU_REG_PI_MEMORY +
- sbid * 4 * MAX_PIS_PER_SB +
+ sbid * 4 * PIS_PER_SB +
i * 4);
return ECORE_SUCCESS;
#endif
struct ecore_sb_info {
- struct status_block *sb_virt;
+ void *sb_virt; /* ptr to "struct status_block_e{4,5}" */
+ u32 sb_size; /* size of "struct status_block_e{4,5}" */
+ __le16 *sb_pi_array; /* ptr to "sb_virt->pi_array" */
+ __le32 *sb_prod_index; /* ptr to "sb_virt->prod_index" */
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
struct ecore_sb_info_dbg {
u32 igu_prod;
u32 igu_cons;
- u16 pi[MAX_PIS_PER_SB];
+ u16 pi[PIS_PER_SB];
};
struct ecore_sb_cnt_info {
/* barrier(); status block is written to by the chip */
/* FIXME: need some sort of barrier. */
- prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
+ prod = OSAL_LE32_TO_CPU(*sb_info->sb_prod_index) &
STATUS_BLOCK_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
struct ecore_ptt *p_ptt,
struct ecore_queue_cid *p_cid, u32 rate)
{
- struct ecore_mcp_link_state *p_link;
+ u16 rl_id;
u8 vport;
vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);
- p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"About to rate limit qm vport %d for queue %d with rate %d\n",
vport, p_cid->rel.queue_id, rate);
- return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
- p_link->speed);
+ rl_id = vport; /* The "rl_id" is set as the "vport_id" */
+ return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, rate);
}
#define RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT 100
if (rc != ECORE_SUCCESS)
return rc;
- addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
+ addr = (u8 *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
TSTORM_ETH_RSS_UPDATE_OFFSET(p_hwfn->rel_pf_id);
*(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr);
bool b_err_big_pkt;
bool b_err_anti_spoof;
bool b_err_ctrl_frame;
+ bool b_en_rgfs;
+ bool b_en_tgfs;
};
/**
#include "ecore_sp_commands.h"
#include "ecore_cxt.h"
-#define CHIP_MCP_RESP_ITER_US 10
-#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
#define GRCBASE_MCP 0xe00000
+#define ECORE_MCP_RESP_ITER_US 10
#define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
#define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+#ifndef ASIC_ONLY
+/* Non-ASIC:
+ * The waiting interval is multiplied by 100 to reduce the impact of the
+ * built-in delay of 100usec in each ecore_rd().
+ * In addition, a factor of 4 comparing to ASIC is applied.
+ */
+#define ECORE_EMUL_MCP_RESP_ITER_US (ECORE_MCP_RESP_ITER_US * 100)
+#define ECORE_EMUL_DRV_MB_MAX_RETRIES ((ECORE_DRV_MB_MAX_RETRIES / 100) * 4)
+#define ECORE_EMUL_MCP_RESET_RETRIES ((ECORE_MCP_RESET_RETRIES / 100) * 4)
+#endif
+
#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
_val)
struct ecore_ptt *p_ptt)
{
struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
+ u32 drv_mb_offsize, mfw_mb_offsize, val;
u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;
u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
- u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+ val = ecore_rd(p_hwfn, p_ptt, MCP_REG_CACHE_PAGING_ENABLE);
+ p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+ if (!p_info->public_base) {
+ DP_NOTICE(p_hwfn, false,
+ "The address of the MCP scratch-pad is not configured\n");
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
- p_info->public_base = 0;
- return ECORE_INVAL;
- }
+ /* Zeroed "public_base" implies no MFW */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ DP_INFO(p_hwfn, "Emulation: Assume no MFW\n");
#endif
-
- p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
- if (!p_info->public_base)
return ECORE_INVAL;
+ }
p_info->public_base |= GRCBASE_MCP;
if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
- /* Do not free mcp_info here, since public_base indicate that
+ /* Do not free mcp_info here, since "public_base" indicates that
* the MCP is not initialized
*/
return ECORE_SUCCESS;
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
+ u32 prev_generic_por_0, seq, delay = ECORE_MCP_RESP_ITER_US, cnt = 0;
+ u32 retries = ECORE_MCP_RESET_RETRIES;
enum _ecore_status_t rc = ECORE_SUCCESS;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
- delay = EMUL_MCP_RESP_ITER_US;
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ delay = ECORE_EMUL_MCP_RESP_ITER_US;
+ retries = ECORE_EMUL_MCP_RESET_RETRIES;
+ }
#endif
-
if (p_hwfn->mcp_info->b_block_cmd) {
DP_NOTICE(p_hwfn, false,
"The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
/* Ensure that only a single thread is accessing the mailbox */
OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ prev_generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
/* Set drv command along with the updated sequence */
ecore_mcp_reread_offsets(p_hwfn, p_ptt);
seq = ++p_hwfn->mcp_info->drv_mb_seq;
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
+ /* Give the MFW up to 500 second (50*1000*10usec) to resume */
do {
- /* Wait for MFW response */
OSAL_UDELAY(delay);
- /* Give the FW up to 500 second (50*1000*10usec) */
- } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
- MISCS_REG_GENERIC_POR_0)) &&
- (cnt++ < ECORE_MCP_RESET_RETRIES));
- if (org_mcp_reset_seq !=
- ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=
+ prev_generic_por_0)
+ break;
+ } while (cnt++ < retries);
+
+ if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=
+ prev_generic_por_0) {
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"MCP was reset after %d usec\n", cnt * delay);
} else {
return rc;
}
+#ifndef ASIC_ONLY
+static void ecore_emul_mcp_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_mb_params *p_mb_params)
+{
+ if (GET_MFW_FIELD(p_mb_params->param, DRV_ID_MCP_HSI_VER) !=
+ 1 /* ECORE_LOAD_REQ_HSI_VER_1 */) {
+ p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1;
+ return;
+ }
+
+ if (!loaded)
+ p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;
+ else if (!loaded_port[p_hwfn->port_id])
+ p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+
+ /* On CMT, always tell that it's engine */
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+ loaded++;
+ loaded_port[p_hwfn->port_id]++;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load phase: 0x%08x load cnt: 0x%x port id=%d port_load=%d\n",
+ p_mb_params->mcp_resp, loaded, p_hwfn->port_id,
+ loaded_port[p_hwfn->port_id]);
+}
+
+static void ecore_emul_mcp_unload_req(struct ecore_hwfn *p_hwfn)
+{
+ loaded--;
+ loaded_port[p_hwfn->port_id]--;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", loaded);
+}
+
+static enum _ecore_status_t
+ecore_emul_mcp_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_mb_params *p_mb_params)
+{
+ if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ switch (p_mb_params->cmd) {
+ case DRV_MSG_CODE_LOAD_REQ:
+ ecore_emul_mcp_load_req(p_hwfn, p_mb_params);
+ break;
+ case DRV_MSG_CODE_UNLOAD_REQ:
+ ecore_emul_mcp_unload_req(p_hwfn);
+ break;
+ case DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT:
+ case DRV_MSG_CODE_RESOURCE_CMD:
+ case DRV_MSG_CODE_MDUMP_CMD:
+ case DRV_MSG_CODE_GET_ENGINE_CONFIG:
+ case DRV_MSG_CODE_GET_PPFID_BITMAP:
+ return ECORE_NOTIMPL;
+ default:
+ break;
+ }
+
+ return ECORE_SUCCESS;
+}
+#endif
+
/* Must be called while cmd_lock is acquired */
static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt *p_ptt)
{
u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+ u32 delay = ECORE_MCP_RESP_ITER_US;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = ECORE_EMUL_MCP_RESP_ITER_US;
+#endif
cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
- OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ OSAL_UDELAY(delay);
cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
- OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ OSAL_UDELAY(delay);
cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
DP_NOTICE(p_hwfn, false,
{
osal_size_t union_data_size = sizeof(union drv_union_data);
u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
- u32 delay = CHIP_MCP_RESP_ITER_US;
+ u32 usecs = ECORE_MCP_RESP_ITER_US;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
- delay = EMUL_MCP_RESP_ITER_US;
- /* There is a built-in delay of 100usec in each MFW response read */
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
- max_retries /= 10;
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn))
+ return ecore_emul_mcp_cmd(p_hwfn, p_mb_params);
+
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ max_retries = ECORE_EMUL_DRV_MB_MAX_RETRIES;
+ usecs = ECORE_EMUL_MCP_RESP_ITER_US;
+ }
#endif
+ if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
+ max_retries = DIV_ROUND_UP(max_retries, 1000);
+ usecs *= 1000;
+ }
/* MCP not initialized */
if (!ecore_mcp_is_init(p_hwfn)) {
}
return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
- delay);
+ usecs);
}
enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_mb_params mb_params;
enum _ecore_status_t rc;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
- loaded--;
- loaded_port[p_hwfn->port_id]--;
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
- loaded);
- }
- return ECORE_SUCCESS;
- }
-#endif
-
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
return ECORE_SUCCESS;
}
-#ifndef ASIC_ONLY
-static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
- u32 *p_load_code)
-{
- static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
-
- if (!loaded)
- load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
- else if (!loaded_port[p_hwfn->port_id])
- load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
- else
- load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
-
- /* On CMT, always tell that it's engine */
- if (ECORE_IS_CMT(p_hwfn->p_dev))
- load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
-
- *p_load_code = load_phase;
- loaded++;
- loaded_port[p_hwfn->port_id]++;
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
- *p_load_code, loaded, p_hwfn->port_id,
- loaded_port[p_hwfn->port_id]);
-}
-#endif
-
static bool
ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
enum ecore_override_force_load override_force_load)
u8 mfw_drv_role = 0, mfw_force_cmd;
enum _ecore_status_t rc;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
- return ECORE_SUCCESS;
- }
-#endif
-
OSAL_MEM_ZERO(&in_params, sizeof(in_params));
in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
in_params.drv_ver_0 = ECORE_VERSION;
u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
u32 path_addr = SECTION_ADDR(mfw_path_offsize,
ECORE_PATH_ID(p_hwfn));
- u32 disabled_vfs[VF_MAX_STATIC / 32];
+ u32 disabled_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
int i;
+ OSAL_MEM_ZERO(disabled_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
+
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Reading Disabled VF information from [offset %08x],"
" path_addr %08x\n",
mfw_path_offsize, path_addr);
- for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+ for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) {
disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
path_addr +
OFFSETOF(struct public_path,
struct ecore_ptt *p_ptt,
u32 *vfs_to_ack)
{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_func_offsize,
- MCP_PF_ID(p_hwfn));
struct ecore_mcp_mb_params mb_params;
enum _ecore_status_t rc;
- int i;
+ u16 i;
- for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
"Acking VFs [%08x,...,%08x] - %08x\n",
i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
mb_params.p_data_src = vfs_to_ack;
- mb_params.data_src_size = VF_MAX_STATIC / 8;
+ mb_params.data_src_size = (u8)VF_BITMAP_SIZE_IN_BYTES;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
&mb_params);
if (rc != ECORE_SUCCESS) {
return ECORE_TIMEOUT;
}
- /* TMP - clear the ACK bits; should be done by MFW */
- for (i = 0; i < (VF_MAX_STATIC / 32); i++)
- ecore_wr(p_hwfn, p_ptt,
- func_addr +
- OFFSETOF(struct public_func, drv_ack_vf_disabled) +
- i * sizeof(u32), 0);
-
return rc;
}
u32 cmd;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (b_up)
+ OSAL_LINK_UPDATE(p_hwfn);
return ECORE_SUCCESS;
+ }
#endif
/* Set the shmem configuration according to params */
struct mdump_config_stc mdump_config;
enum _ecore_status_t rc;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+ DP_INFO(p_hwfn, "Emulation: Can't get mdump info\n");
+ return ECORE_NOTIMPL;
+ }
+#endif
+
OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
/* update storm FW with negotiation results */
ecore_sp_pf_update_ufp(p_hwfn);
+ /* update stag pcp value */
+ ecore_sp_pf_update_stag(p_hwfn);
+
return ECORE_SUCCESS;
}
u32 global_offsize;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
- return ECORE_SUCCESS;
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+ DP_INFO(p_hwfn, "Emulation: Can't get MFW version\n");
+ return ECORE_NOTIMPL;
}
#endif
struct ecore_ptt *p_ptt,
u32 *p_media_type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ *p_media_type = MEDIA_UNSPECIFIED;
/* TODO - Add support for VFs */
if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
if (!ecore_mcp_is_init(p_hwfn)) {
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn, "Emulation: Can't get media type\n");
+ return ECORE_NOTIMPL;
+ }
+#endif
DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
return ECORE_BUSY;
}
- if (!p_ptt) {
- *p_media_type = MEDIA_UNSPECIFIED;
- rc = ECORE_INVAL;
- } else {
- *p_media_type = ecore_rd(p_hwfn, p_ptt,
- p_hwfn->mcp_info->port_addr +
- OFFSETOF(struct public_port,
- media_type));
- }
+ if (!p_ptt)
+ return ECORE_INVAL;
+
+ *p_media_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, media_type));
return ECORE_SUCCESS;
}
u32 flash_size;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
- return ECORE_INVAL;
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+ DP_INFO(p_hwfn, "Emulation: Can't get flash size\n");
+ return ECORE_NOTIMPL;
}
#endif
struct ecore_ptt *p_ptt,
u8 vf_id, u8 num)
{
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
+ DP_INFO(p_hwfn,
+ "Emulation: Avoid sending the %s mailbox command\n",
+ ECORE_IS_BB(p_hwfn->p_dev) ? "CFG_VF_MSIX" :
+ "CFG_PF_VFS_MSIX");
+ return ECORE_SUCCESS;
+ }
+#endif
+
if (ECORE_IS_BB(p_hwfn->p_dev))
return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
else
u32 cmd;
u32 param;
void *p_data_src;
- u8 data_src_size;
void *p_data_dst;
- u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
+ u8 data_src_size;
+ u8 data_dst_size;
+ u32 flags;
+#define ECORE_MB_FLAG_CAN_SLEEP (0x1 << 0)
+#define ECORE_MB_FLAG_AVOID_BLOCK (0x1 << 1)
+#define ECORE_MB_FLAGS_IS_SET(params, flag) \
+ ((params) != OSAL_NULL && ((params)->flags & ECORE_MB_FLAG_##flag))
};
struct ecore_drv_tlv_hdr {
u8 num_uhq_pages_in_ring;
u8 num_queues;
u8 log_page_size;
+ u8 log_page_size_conn;
u8 rqe_log_size;
u8 max_fin_rt;
u8 gl_rq_pi;
p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
/* enable_stag_pri_change should be set if port is in BD mode or,
- * UFP with Host Control mode or, UFP with DCB over base interface.
+ * UFP with Host Control mode.
*/
if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
- if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
- (p_hwfn->p_dcbx_info->results.dcbx_enabled))
+ if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
else
p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
+
+ p_ramrod->outer_tag_config.outer_tag.tci |=
+ OSAL_CPU_TO_LE16(((u16)p_hwfn->ufp_info.tc << 13));
}
/* Place EQ address in RAMROD */
return rc;
p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
- if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
- (p_hwfn->p_dcbx_info->results.dcbx_enabled))
+ if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
else
p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
if (rc != ECORE_SUCCESS)
return rc;
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+ p_ent->ramrod.pf_update.mf_vlan |=
+ OSAL_CPU_TO_LE16(((u16)p_hwfn->ufp_info.tc << 13));
+
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
/***************************************************************************
* HSI access
***************************************************************************/
+
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+
static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
struct ecore_spq *p_spq)
{
+ __le32 *p_spq_base_lo, *p_spq_base_hi;
+ struct regpair *p_consolid_base_addr;
+ u8 *p_flags1, *p_flags9, *p_flags10;
struct core_conn_context *p_cxt;
struct ecore_cxt_info cxt_info;
+ u32 core_conn_context_size;
+ __le16 *p_physical_q0;
u16 physical_q;
enum _ecore_status_t rc;
rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
- if (rc < 0) {
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
p_spq->cid);
return;
}
p_cxt = cxt_info.p_cxt;
+ core_conn_context_size = sizeof(*p_cxt);
+ p_flags1 = &p_cxt->xstorm_ag_context.flags1;
+ p_flags9 = &p_cxt->xstorm_ag_context.flags9;
+ p_flags10 = &p_cxt->xstorm_ag_context.flags10;
+ p_physical_q0 = &p_cxt->xstorm_ag_context.physical_q0;
+ p_spq_base_lo = &p_cxt->xstorm_st_context.spq_base_lo;
+ p_spq_base_hi = &p_cxt->xstorm_st_context.spq_base_hi;
+ p_consolid_base_addr = &p_cxt->xstorm_st_context.consolid_base_addr;
/* @@@TBD we zero the context until we have ilt_reset implemented. */
- OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
- SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
- SET_FIELD(p_cxt->xstorm_ag_context.flags1,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
- /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
- */
- SET_FIELD(p_cxt->xstorm_ag_context.flags9,
- XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
- }
+ OSAL_MEM_ZERO(p_cxt, core_conn_context_size);
+
+ SET_FIELD(*p_flags10, XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(*p_flags1, XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ SET_FIELD(*p_flags9, XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* CDU validation - FIXME currently disabled */
/* QM physical queue */
physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
- p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
+ *p_physical_q0 = OSAL_CPU_TO_LE16(physical_q);
- p_cxt->xstorm_st_context.spq_base_lo =
- DMA_LO_LE(p_spq->chain.p_phys_addr);
- p_cxt->xstorm_st_context.spq_base_hi =
- DMA_HI_LE(p_spq->chain.p_phys_addr);
+ *p_spq_base_lo = DMA_LO_LE(p_spq->chain.p_phys_addr);
+ *p_spq_base_hi = DMA_HI_LE(p_spq->chain.p_phys_addr);
- DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
+ DMA_REGPAIR_LE(*p_consolid_base_addr,
p_hwfn->p_consq->chain.p_phys_addr);
}
*
* @brief ecore_iov_config_perm_table - configure the permission
* zone table.
- * In E4, queue zone permission table size is 320x9. There
+ * The queue zone permission table size is 320x9. There
* are 320 VF queues for single engine device (256 for dual
* engine device), and each entry has the following format:
* {Valid, VF[7:0]}
for (qid = 0; qid < num_rx_queues; qid++) {
p_block = ecore_get_igu_free_sb(p_hwfn, false);
+ if (!p_block)
+ continue;
+
vf->igu_sbs[qid] = p_block->igu_sb_id;
p_block->status &= ~ECORE_IGU_STATUS_FREE;
SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
p_bulletin->capability_speed = p_caps->speed_capabilities;
}
+#ifndef ASIC_ONLY
+static void ecore_emul_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ /* Increase the maximum number of DORQ FIFO entries used by child VFs */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT_LIM, 0x3ec);
+}
+#endif
+
enum _ecore_status_t
ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
&link_params, &link_state, &link_caps);
rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- if (rc == ECORE_SUCCESS) {
- vf->b_init = true;
- p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
+ vf->b_init = true;
+#ifndef REMOVE_DBG
+ p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
(1ULL << (vf->relative_vf_id % 64));
+#endif
- if (IS_LEAD_HWFN(p_hwfn))
- p_hwfn->p_dev->p_iov_info->num_vfs++;
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->p_iov_info->num_vfs++;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ ecore_emul_iov_init_hw_for_vf(p_hwfn, p_ptt);
+#endif
+
+ return ECORE_SUCCESS;
}
- return rc;
+#ifndef ASIC_ONLY
+static void ecore_emul_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ u32 sriov_dis = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_SR_IOV_DISABLED_REQUEST);
+
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR,
+ sriov_dis);
}
+}
+#endif
enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
p_hwfn->p_dev->p_iov_info->num_vfs--;
}
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ ecore_emul_iov_release_hw_for_vf(p_hwfn, p_ptt);
+#endif
+
return ECORE_SUCCESS;
}
eng_vf_id = p_vf->abs_vf_id;
- OSAL_MEMSET(¶ms, 0, sizeof(struct dmae_params));
+ OSAL_MEMSET(¶ms, 0, sizeof(params));
SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
params.dst_vf_id = eng_vf_id;
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
- pfdev_info->indices_per_sb = MAX_PIS_PER_SB;
+ pfdev_info->indices_per_sb = PIS_PER_SB;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
- /* Update the TLV with the response */
+ /* Update the TLV with the response.
+ * The VF Rx producers are located in the vf zone.
+ */
if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
req = &mbx->req_virt->start_rxq;
- p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+
+ p_tlv->offset =
+ PXP_VF_BAR0_START_MSDM_ZONE_B +
OFFSETOF(struct mstorm_vf_zone,
non_trigger.eth_rx_queue_producers) +
sizeof(struct eth_rx_prod_data) * req->rx_qid;
if (p_cid == OSAL_NULL)
goto out;
- /* Legacy VFs have their Producers in a different location, which they
- * calculate on their own and clean the producer prior to this.
+ /* The VF Rx producers are located in the vf zone.
+ * Legacy VFs have their producers in the queue zone, but they
+ * calculate the location by their own and clean them prior to this.
*/
if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
+ req->rx_qid),
0);
rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
return ECORE_SUCCESS;
}
+#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
+
static enum _ecore_status_t
ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
- int i, cnt;
+ u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
+ u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
+ u8 max_ports_per_engine = p_hwfn->p_dev->num_ports_in_engine;
+ u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
+ u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
+ u8 port_id, tc, tc_id = 0, voq = 0;
+ int cnt;
/* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
- u32 prod;
-
- cons[i] = ecore_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
- i * 0x40);
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ?
+ tc :
+ PURE_LB_TC;
+ voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
+ cons[voq] = ecore_rd(p_hwfn, p_ptt,
+ cons_voq0_addr + voq * 0x40);
prod = ecore_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
- i * 0x40);
- distance[i] = prod - cons[i];
+ prod_voq0_addr + voq * 0x40);
+ distance[voq] = prod - cons[voq];
+ }
}
/* Wait for consumers to pass the producers */
- i = 0;
+ port_id = 0;
+ tc = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS_E4; i++) {
- u32 tmp;
-
+ for (; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ?
+ tc :
+ PURE_LB_TC;
+ voq = VOQ(port_id, tc_id,
+ max_phys_tcs_per_port);
tmp = ecore_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
- i * 0x40);
- if (distance[i] > tmp - cons[i])
+ cons_voq0_addr + voq * 0x40);
+ if (distance[voq] > tmp - cons[voq])
+ break;
+ }
+
+ if (tc == max_phys_tcs_per_port + 1)
+ tc = 0;
+ else
break;
}
- if (i == MAX_NUM_VOQS_E4)
+ if (port_id == max_ports_per_engine)
break;
OSAL_MSLEEP(20);
}
if (cnt == 50) {
- DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
- p_vf->abs_vf_id, i);
+ DP_ERR(p_hwfn,
+ "VF[%d] - pbf polling failed on VOQ %d [port_id %d, tc_id %d]\n",
+ p_vf->abs_vf_id, voq, port_id, tc_id);
return ECORE_TIMEOUT;
}
enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 ack_vfs[VF_MAX_STATIC / 32];
+ u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
enum _ecore_status_t rc = ECORE_SUCCESS;
u16 i;
- OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+ OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
/* Since BRB <-> PRS interface can't be tested as part of the flr
* polling due to HW limitations, simply sleep a bit. And since
ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 rel_vf_id)
{
- u32 ack_vfs[VF_MAX_STATIC / 32];
+ u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
enum _ecore_status_t rc = ECORE_SUCCESS;
- OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+ OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
/* Wait instead of polling the BRB <-> PRS interface */
OSAL_MSLEEP(100);
u16 i;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
- for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+
+ for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"[%08x,...,%08x]: %08x\n",
i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
if (!vf_info)
return ECORE_INVAL;
- OSAL_MEMSET(¶ms, 0, sizeof(struct dmae_params));
+ OSAL_MEMSET(¶ms, 0, sizeof(params));
SET_FIELD(params.flags, DMAE_PARAMS_SRC_VF_VALID, 0x1);
SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1);
params.src_vf_id = vf_info->abs_vf_id;
struct ecore_ptt *p_ptt,
int vfid, int val)
{
- struct ecore_mcp_link_state *p_link;
struct ecore_vf_info *vf;
u8 abs_vp_id = 0;
+ u16 rl_id;
enum _ecore_status_t rc;
vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (rc != ECORE_SUCCESS)
return rc;
- p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
-
- return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
- p_link->speed);
+ rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
+ return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
}
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct ecore_vf_acquire_sw_info vf_sw_info;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
struct vf_pf_resc_request *p_resc;
bool resources_acquired = false;
struct vfpf_acquire_tlv *req;
p_iov->bulletin.size = resp->bulletin_size;
/* get HW info */
- p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
- p_hwfn->p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
+ p_dev->type = resp->pfdev_info.dev_type;
+ p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
DP_INFO(p_hwfn, "Chip details - %s%d\n",
- ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
+ ECORE_IS_BB(p_dev) ? "BB" : "AH",
CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
- p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
+ p_dev->chip_num = pfdev_info->chip_num & 0xffff;
/* Learn of the possibility of CMT */
if (IS_LEAD_HWFN(p_hwfn)) {
if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
DP_INFO(p_hwfn, "100g VF\n");
- p_hwfn->p_dev->num_hwfns = 2;
+ p_dev->num_hwfns = 2;
}
}
return ECORE_NOMEM;
}
-#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
-#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
- (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
-
/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
static void
__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
u32 init_prod_val = 0;
- *pp_prod = (u8 OSAL_IOMEM *)
- p_hwfn->regview +
+ *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
MSTORM_QZONE_START(p_hwfn->p_dev) +
(hw_qid) * MSTORM_QZONE_SIZE;
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 10
+#define QEDE_PMD_VERSION_MINOR 11
#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
char qede_fw_file[PATH_MAX];
static const char * const QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.37.7.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.40.25.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
fp->rxq->handle = ret_params.p_handle;
- fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI];
qede_update_rx_prod(qdev, fp->rxq);
eth_dev->data->rx_queue_state[rx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
txq->doorbell_addr = ret_params.p_doorbell;
txq->handle = ret_params.p_handle;
- txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
+ txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)];
SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
DB_DEST_XCM);
SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,