X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fbase%2Fecore_l2.c;h=e3afc8a3d7b044b1b972011ebf9af4c23b10e31d;hb=c55a1667950f43be515c976269749a2a00c7268d;hp=3071b4613644cdfdb7c819eb92cac85a595ceec9;hpb=30ecf67308f2e0477f60e66edf789b72e239c759;p=dpdk.git diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c index 3071b46136..e3afc8a3d7 100644 --- a/drivers/net/qede/base/ecore_l2.c +++ b/drivers/net/qede/base/ecore_l2.c @@ -196,6 +196,7 @@ static struct ecore_queue_cid * _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, u32 cid, struct ecore_queue_start_common_params *p_params, + bool b_is_rx, struct ecore_queue_cid_vf_params *p_vf_params) { struct ecore_queue_cid *p_cid; @@ -214,6 +215,7 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, p_cid->rel.queue_id = p_params->queue_id; p_cid->rel.stats_id = p_params->stats_id; p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; + p_cid->b_is_rx = b_is_rx; p_cid->sb_idx = p_params->sb_idx; /* Fill-in bits related to VFs' queues if information was provided */ @@ -233,7 +235,7 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, } /* Calculate the engine-absolute indices of the resources. - * The would guarantee they're valid later on. + * This would guarantee they're valid later on. * In some cases [SBs] we already have the right values. */ rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); @@ -287,6 +289,7 @@ fail: struct ecore_queue_cid * ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, struct ecore_queue_start_common_params *p_params, + bool b_is_rx, struct ecore_queue_cid_vf_params *p_vf_params) { struct ecore_queue_cid *p_cid; @@ -321,7 +324,7 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, } p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, - p_params, p_vf_params); + p_params, b_is_rx, p_vf_params); if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf) _ecore_cxt_release_cid(p_hwfn, cid, vfid); @@ -330,9 +333,11 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, static struct ecore_queue_cid * ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid, + bool b_is_rx, struct ecore_queue_start_common_params *p_params) { - return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL); + return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, + OSAL_NULL); } enum _ecore_status_t @@ -342,6 +347,7 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, struct vport_start_ramrod_data *p_ramrod = OSAL_NULL; struct ecore_spq_entry *p_ent = OSAL_NULL; struct ecore_sp_init_data init_data; + struct eth_vport_tpa_param *p_tpa; u16 rx_mode = 0, tx_err = 0; u8 abs_vport_id = 0; enum _ecore_status_t rc = ECORE_NOTIMPL; @@ -366,8 +372,8 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, p_ramrod->vport_id = abs_vport_id; p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu); - p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; + p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; p_ramrod->drop_ttl0_en = p_params->drop_ttl0; p_ramrod->untagged = p_params->only_untagged; p_ramrod->zero_placement_offset = p_params->zero_placement_offset; @@ -402,22 +408,22 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err); /* TPA related fields */ - OSAL_MEMSET(&p_ramrod->tpa_param, 0, - sizeof(struct eth_vport_tpa_param)); - p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; + p_tpa = &p_ramrod->tpa_param; + OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param)); + p_tpa->max_buff_num = p_params->max_buffers_per_cqe; switch (p_params->tpa_mode) { case ECORE_TPA_MODE_GRO: - p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; - p_ramrod->tpa_param.tpa_max_size = (u16)-1; - p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; - p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; - p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; - p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; - p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1; - p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1; - p_ramrod->tpa_param.tpa_pkt_split_flg = 1; - p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; + p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; + p_tpa->tpa_max_size = (u16)-1; + p_tpa->tpa_min_size_to_cont = p_params->mtu / 2; + p_tpa->tpa_min_size_to_start = p_params->mtu / 2; + p_tpa->tpa_ipv4_en_flg = 1; + p_tpa->tpa_ipv6_en_flg = 1; + p_tpa->tpa_ipv4_tunn_en_flg = 1; + p_tpa->tpa_ipv6_tunn_en_flg = 1; + p_tpa->tpa_pkt_split_flg = 1; + p_tpa->tpa_gro_consistent_flg = 1; break; default: break; @@ -459,6 +465,7 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn, struct ecore_rss_params *p_rss) { struct eth_vport_rss_config *p_config; + u16 capabilities = 0; int i, table_size; enum _ecore_status_t rc = ECORE_SUCCESS; @@ -485,26 +492,26 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn, p_config->capabilities = 0; - SET_FIELD(p_config->capabilities, + SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, !!(p_rss->rss_caps & ECORE_RSS_IPV4)); - SET_FIELD(p_config->capabilities, + SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, !!(p_rss->rss_caps & ECORE_RSS_IPV6)); - SET_FIELD(p_config->capabilities, + SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP)); - SET_FIELD(p_config->capabilities, + SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP)); - SET_FIELD(p_config->capabilities, + SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP)); - SET_FIELD(p_config->capabilities, + SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP)); p_config->tbl_size = p_rss->rss_table_size_log; - p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities); + p_config->capabilities = OSAL_CPU_TO_LE16(capabilities); DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", @@ -636,6 +643,7 @@ ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod, struct ecore_sge_tpa_params *p_params) { struct eth_vport_tpa_param *p_tpa; + u16 val; if (!p_params) { p_ramrod->common.update_tpa_param_flg = 0; @@ -657,9 +665,12 @@ ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod, p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; - p_tpa->tpa_max_size = p_params->tpa_max_size; - p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; - p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; + val = p_params->tpa_max_size; + p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val); + val = p_params->tpa_min_size_to_start; + p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val); + val = p_params->tpa_min_size_to_cont; + p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val); } static void @@ -984,7 +995,7 @@ ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, enum _ecore_status_t rc; /* Allocate a CID for the queue */ - p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params); + p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); if (p_cid == OSAL_NULL) return ECORE_NOMEM; @@ -1200,7 +1211,7 @@ ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid, struct ecore_queue_cid *p_cid; enum _ecore_status_t rc; - p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params); + p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); if (p_cid == OSAL_NULL) return ECORE_INVAL; @@ -1968,6 +1979,7 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev, struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; struct ecore_ptt *p_ptt = IS_PF(p_dev) ? ecore_ptt_acquire(p_hwfn) : OSAL_NULL; + bool b_get_port_stats; if (IS_PF(p_dev)) { /* The main vport index is relative first */ @@ -1982,8 +1994,9 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev, continue; } + b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn); __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, - IS_PF(p_dev) ? true : false); + b_get_port_stats); out: if (IS_PF(p_dev) && p_ptt) @@ -2059,12 +2072,16 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_arfs_config_params *p_cfg_params) { + if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits)) + return; + if (p_cfg_params->arfs_enable) { - ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, - p_cfg_params->tcp, - p_cfg_params->udp, - p_cfg_params->ipv4, - p_cfg_params->ipv6); + ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_cfg_params->tcp, + p_cfg_params->udp, + p_cfg_params->ipv4, + p_cfg_params->ipv6, + GFT_PROFILE_TYPE_4_TUPLE); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", p_cfg_params->tcp ? "Enable" : "Disable", @@ -2072,7 +2089,7 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, p_cfg_params->ipv4 ? "Enable" : "Disable", p_cfg_params->ipv6 ? "Enable" : "Disable"); } else { - ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); } DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n", p_cfg_params->arfs_enable ? "Enable" : "Disable"); @@ -2123,9 +2140,17 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr); p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length); - p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id); + + p_ramrod->action_icid_valid = 0; + p_ramrod->action_icid = 0; + + p_ramrod->rx_qid_valid = 1; + p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id); + + p_ramrod->flow_id_valid = 0; + p_ramrod->flow_id = 0; + p_ramrod->vport_id = abs_vport_id; - p_ramrod->filter_type = RFS_FILTER_TYPE; p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; @@ -2137,3 +2162,108 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); } + +int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_queue_cid *p_cid, + u16 *p_rx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + enum _ecore_status_t rc; + + rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(osal_uintptr_t)&sb_entry, 2, 0); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); + + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + coalesce = ecore_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return ECORE_INVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_rx_coal = (u16)(coalesce << timer_res); + + return ECORE_SUCCESS; +} + +int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_queue_cid *p_cid, + u16 *p_tx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + enum _ecore_status_t rc; + + rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(osal_uintptr_t)&sb_entry, 2, 0); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); + + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + coalesce = ecore_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return ECORE_INVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_tx_coal = (u16)(coalesce << timer_res); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal, + void *handle) +{ + struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_ptt *p_ptt; + + if (IS_VF(p_hwfn->p_dev)) { + rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, + "Unable to read queue calescing\n"); + + return rc; + } + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_AGAIN; + + if (p_cid->b_is_rx) { + rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc != ECORE_SUCCESS) + goto out; + } else { + rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc != ECORE_SUCCESS) + goto out; + } + +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +}