Move RSS configuration into using L2-handles instead of queue-ids.
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
p_cid->cid = cid;
p_cid->vf_qid = vf_qid;
p_cid->rel = *p_params;
+ p_cid->p_owner = p_hwfn;
/* Don't try calculating the absolute indices for VFs */
if (IS_VF(p_hwfn->p_dev)) {
struct vport_update_ramrod_data *p_ramrod,
struct ecore_rss_params *p_rss)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct eth_vport_rss_config *p_config;
- u16 abs_l2_queue = 0;
- int i;
+ int i, table_size;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
if (!p_rss) {
p_ramrod->common.update_rss_flg = 0;
p_config->capabilities,
p_config->update_rss_ind_table, p_config->update_rss_key);
- for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
- rc = ecore_fw_l2_queue(p_hwfn,
- p_rss->rss_ind_table[i],
- &abs_l2_queue);
- if (rc != ECORE_SUCCESS)
- return rc;
+ table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
+ 1 << p_config->tbl_size);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
- p_config->indirection_table[i] = OSAL_CPU_TO_LE16(abs_l2_queue);
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "i= %d, queue = %d\n",
- i, p_config->indirection_table[i]);
+ if (!p_queue)
+ return ECORE_INVAL;
+
+ p_config->indirection_table[i] =
+ OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "Configured RSS indirection table [%d entries]:\n",
+ table_size);
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
}
for (i = 0; i < 10; i++)
/* Legacy VFs might have Rx producer located elsewhere */
bool b_legacy_vf;
+
+ struct ecore_hwfn *p_owner;
};
void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
- u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+
+ /* Indirection table consist of rx queue handles */
+ void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
u32 rss_key[ECORE_RSS_KEY_SIZE];
};
struct ecore_vf_info *vf,
struct ecore_sp_vport_update_params *p_data,
struct ecore_rss_params *p_rss,
- struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask, u16 *tlvs_accepted)
{
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
- u16 i, q_idx, max_q_idx;
+ bool b_reject = false;
u16 table_size;
+ u16 i, q_idx;
p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
p_rss->rss_eng_id = vf->relative_vf_id + 1;
p_rss->rss_caps = p_rss_tlv->rss_caps;
p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
- OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
- sizeof(p_rss->rss_ind_table));
OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
sizeof(p_rss->rss_key));
table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
(1 << p_rss_tlv->rss_table_size_log));
- max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
-
for (i = 0; i < table_size; i++) {
- u16 index = vf->vf_queues[0].fw_rx_qid;
+ q_idx = p_rss_tlv->rss_ind_table[i];
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
- q_idx = p_rss->rss_ind_table[i];
- if (q_idx >= max_q_idx)
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d,"
- " rxq is out of range\n",
- i, q_idx);
- else if (!vf->vf_queues[q_idx].p_rx_cid)
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d, rxq is not active\n",
- i, q_idx);
- else
- index = vf->vf_queues[q_idx].fw_rx_qid;
- p_rss->rss_ind_table[i] = index;
+ if (!vf->vf_queues[q_idx].p_rx_cid) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Omitting RSS due to inactive queue %08x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
+
+ p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
}
p_data->rss_params = p_rss;
+out:
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+ if (!b_reject)
+ *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
}
static void
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_rss_params *p_rss_params = OSAL_NULL;
struct ecore_sp_vport_update_params params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct ecore_sge_tpa_params sge_tpa_params;
u16 tlvs_mask = 0, tlvs_accepted = 0;
- struct ecore_rss_params rss_params;
u8 status = PFVF_STATUS_SUCCESS;
u16 length;
enum _ecore_status_t rc;
goto out;
}
+ p_rss_params = OSAL_VALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
+ if (p_rss_params == OSAL_NULL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
OSAL_MEMSET(¶ms, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
- ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params,
- mbx, &tlvs_mask);
ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
&sge_tpa_params, mbx, &tlvs_mask);
+ tlvs_accepted = tlvs_mask;
+
+ /* Some of the extended TLVs need to be validated first; In that case,
+ * they can update the mask without updating the accepted [so that
+ * PF could communicate to VF it has rejected request].
+ */
+ ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
+ mbx, &tlvs_mask, &tlvs_accepted);
+
/* Just log a message if there is no single extended tlv in buffer.
* When all features of vport update ramrod would be requested by VF
* as extended TLVs in buffer then an error can be returned in response
* if there is no extended TLV present in buffer.
*/
- tlvs_accepted = tlvs_mask;
-
if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
¶ms, &tlvs_accepted) !=
ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
out:
+ OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
tlvs_mask, tlvs_accepted);
ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
if (p_params->rss_params) {
struct ecore_rss_params *rss_params = p_params->rss_params;
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ int i, table_size;
size = sizeof(struct vfpf_vport_update_rss_tlv);
p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
p_rss_tlv->rss_enable = rss_params->rss_enable;
p_rss_tlv->rss_caps = rss_params->rss_caps;
p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
- OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
- sizeof(rss_params->rss_ind_table));
+
+ table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE,
+ 1 << p_rss_tlv->rss_table_size_log);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue;
+
+ p_queue = rss_params->rss_ind_table[i];
+ p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+ }
+
OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
sizeof(rss_params->rss_key));
}
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_sp_vport_update_params vport_update_params;
struct ecore_rss_params rss_params;
- struct ecore_rss_params params;
struct ecore_hwfn *p_hwfn;
uint32_t *key = (uint32_t *)rss_conf->rss_key;
uint64_t hf = rss_conf->rss_hf;
uint8_t len = rss_conf->rss_key_len;
+ uint8_t idx;
uint8_t i;
int rc;
/* tbl_size has to be set with capabilities */
rss_params.rss_table_size_log = 7;
vport_update_params.vport_id = 0;
+ /* pass the L2 handles instead of qids */
+ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
+ idx = qdev->rss_ind_table[i];
+ rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
+ }
vport_update_params.rss_params = &rss_params;
for_each_hwfn(edev, i) {
shift = i % RTE_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
entry = reta_conf[idx].reta[shift];
- params.rss_ind_table[i] = entry;
+ /* Pass rxq handles to ecore */
+ params.rss_ind_table[i] =
+ qdev->fp_array[entry].rxq->handle;
+ /* Update the local copy for RETA query command */
+ qdev->rss_ind_table[i] = entry;
}
}
/* Fix up RETA for CMT mode device */
if (edev->num_hwfns > 1)
qdev->rss_enable = qed_update_rss_parm_cmt(edev,
- ¶ms.rss_ind_table[0]);
+ params.rss_ind_table[0]);
params.update_rss_ind_table = 1;
params.rss_table_size_log = 7;
params.update_rss_config = 1;
}
}
- /* Update the local copy for RETA query command */
- memcpy(qdev->rss_ind_table, params.rss_ind_table,
- sizeof(params.rss_ind_table));
-
return 0;
}