2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
30 const char *ecore_channel_tlvs_string[] = {
31 "CHANNEL_TLV_NONE", /* ends tlv sequence */
32 "CHANNEL_TLV_ACQUIRE",
33 "CHANNEL_TLV_VPORT_START",
34 "CHANNEL_TLV_VPORT_UPDATE",
35 "CHANNEL_TLV_VPORT_TEARDOWN",
36 "CHANNEL_TLV_START_RXQ",
37 "CHANNEL_TLV_START_TXQ",
38 "CHANNEL_TLV_STOP_RXQ",
39 "CHANNEL_TLV_STOP_TXQ",
40 "CHANNEL_TLV_UPDATE_RXQ",
41 "CHANNEL_TLV_INT_CLEANUP",
43 "CHANNEL_TLV_RELEASE",
44 "CHANNEL_TLV_LIST_END",
45 "CHANNEL_TLV_UCAST_FILTER",
46 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51 "CHANNEL_TLV_VPORT_UPDATE_RSS",
52 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
54 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
55 "CHANNEL_TLV_COALESCE_UPDATE",
60 static u8 ecore_vf_calculate_legacy(struct ecore_hwfn *p_hwfn,
61 struct ecore_vf_info *p_vf)
65 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
66 ETH_HSI_VER_NO_PKT_LEN_TUNN)
67 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
69 if (!(p_vf->acquire.vfdev_info.capabilities &
70 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
71 legacy |= ECORE_QCID_LEGACY_VF_CID;
77 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
78 struct ecore_vf_info *p_vf)
80 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
81 struct ecore_spq_entry *p_ent = OSAL_NULL;
82 struct ecore_sp_init_data init_data;
83 enum _ecore_status_t rc = ECORE_NOTIMPL;
87 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
88 init_data.cid = ecore_spq_get_cid(p_hwfn);
89 init_data.opaque_fid = p_vf->opaque_fid;
90 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
92 rc = ecore_sp_init_request(p_hwfn, &p_ent,
93 COMMON_RAMROD_VF_START,
94 PROTOCOLID_COMMON, &init_data);
95 if (rc != ECORE_SUCCESS)
98 p_ramrod = &p_ent->ramrod.vf_start;
100 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
101 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
103 switch (p_hwfn->hw_info.personality) {
105 p_ramrod->personality = PERSONALITY_ETH;
107 case ECORE_PCI_ETH_ROCE:
108 case ECORE_PCI_ETH_IWARP:
109 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
112 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
113 p_hwfn->hw_info.personality);
117 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
118 if (fp_minor > ETH_HSI_VER_MINOR &&
119 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
120 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
121 "VF [%d] - Requested fp hsi %02x.%02x which is"
122 " slightly newer than PF's %02x.%02x; Configuring"
125 ETH_HSI_VER_MAJOR, fp_minor,
126 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
127 fp_minor = ETH_HSI_VER_MINOR;
130 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
131 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
133 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
134 "VF[%d] - Starting using HSI %02x.%02x\n",
135 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
137 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
140 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
144 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
145 struct ecore_spq_entry *p_ent = OSAL_NULL;
146 struct ecore_sp_init_data init_data;
147 enum _ecore_status_t rc = ECORE_NOTIMPL;
150 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
151 init_data.cid = ecore_spq_get_cid(p_hwfn);
152 init_data.opaque_fid = opaque_vfid;
153 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
155 rc = ecore_sp_init_request(p_hwfn, &p_ent,
156 COMMON_RAMROD_VF_STOP,
157 PROTOCOLID_COMMON, &init_data);
158 if (rc != ECORE_SUCCESS)
161 p_ramrod = &p_ent->ramrod.vf_stop;
163 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
165 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
168 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
169 bool b_enabled_only, bool b_non_malicious)
171 if (!p_hwfn->pf_iov_info) {
172 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
176 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
180 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
184 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
191 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
195 struct ecore_vf_info *vf = OSAL_NULL;
197 if (!p_hwfn->pf_iov_info) {
198 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
202 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
203 b_enabled_only, false))
204 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
206 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
212 static struct ecore_queue_cid *
213 ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
214 struct ecore_vf_info *p_vf,
215 struct ecore_vf_queue *p_queue)
219 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
220 if (p_queue->cids[i].p_cid &&
221 !p_queue->cids[i].b_is_tx)
222 return p_queue->cids[i].p_cid;
228 enum ecore_iov_validate_q_mode {
229 ECORE_IOV_VALIDATE_Q_NA,
230 ECORE_IOV_VALIDATE_Q_ENABLE,
231 ECORE_IOV_VALIDATE_Q_DISABLE,
234 static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
235 struct ecore_vf_info *p_vf,
237 enum ecore_iov_validate_q_mode mode,
242 if (mode == ECORE_IOV_VALIDATE_Q_NA)
245 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
246 struct ecore_vf_queue_cid *p_qcid;
248 p_qcid = &p_vf->vf_queues[qid].cids[i];
250 if (p_qcid->p_cid == OSAL_NULL)
253 if (p_qcid->b_is_tx != b_is_tx)
256 /* Found. It's enabled. */
257 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
260 /* In case we haven't found any valid cid, then its disabled */
261 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
264 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
265 struct ecore_vf_info *p_vf,
267 enum ecore_iov_validate_q_mode mode)
269 if (rx_qid >= p_vf->num_rxqs) {
270 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
271 "VF[0x%02x] - can't touch Rx queue[%04x];"
272 " Only 0x%04x are allocated\n",
273 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
277 return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
281 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
282 struct ecore_vf_info *p_vf,
284 enum ecore_iov_validate_q_mode mode)
286 if (tx_qid >= p_vf->num_txqs) {
287 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
288 "VF[0x%02x] - can't touch Tx queue[%04x];"
289 " Only 0x%04x are allocated\n",
290 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
294 return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
298 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
299 struct ecore_vf_info *p_vf,
304 for (i = 0; i < p_vf->num_sbs; i++)
305 if (p_vf->igu_sbs[i] == sb_idx)
308 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
309 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
310 " one of its 0x%02x SBs\n",
311 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
316 /* Is there at least 1 queue open? */
317 static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
318 struct ecore_vf_info *p_vf)
322 for (i = 0; i < p_vf->num_rxqs; i++)
323 if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
324 ECORE_IOV_VALIDATE_Q_ENABLE,
331 static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
332 struct ecore_vf_info *p_vf)
336 for (i = 0; i < p_vf->num_txqs; i++)
337 if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
338 ECORE_IOV_VALIDATE_Q_ENABLE,
345 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
347 struct ecore_ptt *p_ptt)
349 struct ecore_bulletin_content *p_bulletin;
350 int crc_size = sizeof(p_bulletin->crc);
351 struct ecore_dmae_params params;
352 struct ecore_vf_info *p_vf;
354 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
358 /* TODO - check VF is in a state where it can accept message */
359 if (!p_vf->vf_bulletin)
362 p_bulletin = p_vf->bulletin.p_virt;
364 /* Increment bulletin board version and compute crc */
365 p_bulletin->version++;
366 p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
367 p_vf->bulletin.size - crc_size);
369 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
370 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
371 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
373 /* propagate bulletin board via dmae to vm memory */
374 OSAL_MEMSET(¶ms, 0, sizeof(params));
375 params.flags = ECORE_DMAE_FLAG_VF_DST;
376 params.dst_vfid = p_vf->abs_vf_id;
377 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
378 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
382 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
384 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
387 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
388 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
390 OSAL_PCI_READ_CONFIG_WORD(p_dev,
391 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
392 OSAL_PCI_READ_CONFIG_WORD(p_dev,
393 pos + PCI_SRIOV_INITIAL_VF,
396 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
398 /* @@@TODO - in future we might want to add an OSAL here to
399 * allow each OS to decide on its own how to act.
401 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
402 "Number of VFs are already set to non-zero value."
403 " Ignoring PCI configuration value\n");
407 OSAL_PCI_READ_CONFIG_WORD(p_dev,
408 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
410 OSAL_PCI_READ_CONFIG_WORD(p_dev,
411 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
413 OSAL_PCI_READ_CONFIG_WORD(p_dev,
414 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
416 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
417 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
419 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
421 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
423 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
424 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
425 " stride %d, page size 0x%x\n",
426 iov->nres, iov->cap, iov->ctrl,
427 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
428 iov->offset, iov->stride, iov->pgsz);
430 /* Some sanity checks */
431 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
432 iov->total_vfs > NUM_OF_VFS(p_dev)) {
433 /* This can happen only due to a bug. In this case we set
434 * num_vfs to zero to avoid memory corruption in the code that
435 * assumes max number of vfs
437 DP_NOTICE(p_dev, false,
438 "IOV: Unexpected number of vfs set: %d"
439 " setting num_vf to zero\n",
446 return ECORE_SUCCESS;
449 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
451 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
452 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
453 struct ecore_bulletin_content *p_bulletin_virt;
454 dma_addr_t req_p, rply_p, bulletin_p;
455 union pfvf_tlvs *p_reply_virt_addr;
456 union vfpf_tlvs *p_req_virt_addr;
459 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
461 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
462 req_p = p_iov_info->mbx_msg_phys_addr;
463 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
464 rply_p = p_iov_info->mbx_reply_phys_addr;
465 p_bulletin_virt = p_iov_info->p_bulletins;
466 bulletin_p = p_iov_info->bulletins_phys;
467 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
469 "ecore_iov_setup_vfdb called without alloc mem first\n");
473 for (idx = 0; idx < p_iov->total_vfs; idx++) {
474 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
477 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
478 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
479 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
480 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
482 #ifdef CONFIG_ECORE_SW_CHANNEL
483 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
484 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
486 vf->state = VF_STOPPED;
489 vf->bulletin.phys = idx *
490 sizeof(struct ecore_bulletin_content) + bulletin_p;
491 vf->bulletin.p_virt = p_bulletin_virt + idx;
492 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
494 vf->relative_vf_id = idx;
495 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
496 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
497 vf->concrete_fid = concrete;
498 /* TODO - need to devise a better way of getting opaque */
499 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
500 (vf->abs_vf_id << 8);
502 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
503 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
507 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
509 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
513 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
515 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
516 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
518 /* Allocate PF Mailbox buffer (per-VF) */
519 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
520 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
521 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
522 &p_iov_info->mbx_msg_phys_addr,
523 p_iov_info->mbx_msg_size);
527 /* Allocate PF Mailbox Reply buffer (per-VF) */
528 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
529 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
530 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
531 &p_iov_info->mbx_reply_phys_addr,
532 p_iov_info->mbx_reply_size);
536 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
538 p_v_addr = &p_iov_info->p_bulletins;
539 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
540 &p_iov_info->bulletins_phys,
541 p_iov_info->bulletins_size);
545 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
546 "PF's Requests mailbox [%p virt 0x%lx phys], "
547 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
548 " [%p virt 0x%lx phys]\n",
549 p_iov_info->mbx_msg_virt_addr,
550 (unsigned long)p_iov_info->mbx_msg_phys_addr,
551 p_iov_info->mbx_reply_virt_addr,
552 (unsigned long)p_iov_info->mbx_reply_phys_addr,
553 p_iov_info->p_bulletins,
554 (unsigned long)p_iov_info->bulletins_phys);
556 return ECORE_SUCCESS;
559 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
561 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
563 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
564 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
565 p_iov_info->mbx_msg_virt_addr,
566 p_iov_info->mbx_msg_phys_addr,
567 p_iov_info->mbx_msg_size);
569 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
570 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
571 p_iov_info->mbx_reply_virt_addr,
572 p_iov_info->mbx_reply_phys_addr,
573 p_iov_info->mbx_reply_size);
575 if (p_iov_info->p_bulletins)
576 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
577 p_iov_info->p_bulletins,
578 p_iov_info->bulletins_phys,
579 p_iov_info->bulletins_size);
582 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
584 struct ecore_pf_iov *p_sriov;
586 if (!IS_PF_SRIOV(p_hwfn)) {
587 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
588 "No SR-IOV - no need for IOV db\n");
589 return ECORE_SUCCESS;
592 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
594 DP_NOTICE(p_hwfn, true,
595 "Failed to allocate `struct ecore_sriov'\n");
599 p_hwfn->pf_iov_info = p_sriov;
601 return ecore_iov_allocate_vfdb(p_hwfn);
604 void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
606 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
609 ecore_iov_setup_vfdb(p_hwfn);
612 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
614 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
615 ecore_iov_free_vfdb(p_hwfn);
616 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
620 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
622 OSAL_FREE(p_dev, p_dev->p_iov_info);
625 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
627 struct ecore_dev *p_dev = p_hwfn->p_dev;
629 enum _ecore_status_t rc;
631 if (IS_VF(p_hwfn->p_dev))
632 return ECORE_SUCCESS;
634 /* Learn the PCI configuration */
635 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
636 PCI_EXT_CAP_ID_SRIOV);
638 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
639 return ECORE_SUCCESS;
642 /* Allocate a new struct for IOV information */
643 /* TODO - can change to VALLOC when its available */
644 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
645 sizeof(*p_dev->p_iov_info));
646 if (!p_dev->p_iov_info) {
647 DP_NOTICE(p_hwfn, true,
648 "Can't support IOV due to lack of memory\n");
651 p_dev->p_iov_info->pos = pos;
653 rc = ecore_iov_pci_cfg_info(p_dev);
657 /* We want PF IOV to be synonemous with the existence of p_iov_info;
658 * In case the capability is published but there are no VFs, simply
659 * de-allocate the struct.
661 if (!p_dev->p_iov_info->total_vfs) {
662 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
663 "IOV capabilities, but no VFs are published\n");
664 OSAL_FREE(p_dev, p_dev->p_iov_info);
665 return ECORE_SUCCESS;
668 /* First VF index based on offset is tricky:
669 * - If ARI is supported [likely], offset - (16 - pf_id) would
670 * provide the number for eng0. 2nd engine Vfs would begin
671 * after the first engine's VFs.
672 * - If !ARI, VFs would start on next device.
673 * so offset - (256 - pf_id) would provide the number.
674 * Utilize the fact that (256 - pf_id) is achieved only be later
675 * to diffrentiate between the two.
678 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
679 u32 first = p_hwfn->p_dev->p_iov_info->offset +
680 p_hwfn->abs_pf_id - 16;
682 p_dev->p_iov_info->first_vf_in_pf = first;
684 if (ECORE_PATH_ID(p_hwfn))
685 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
687 u32 first = p_hwfn->p_dev->p_iov_info->offset +
688 p_hwfn->abs_pf_id - 256;
690 p_dev->p_iov_info->first_vf_in_pf = first;
693 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
694 "First VF in hwfn 0x%08x\n",
695 p_dev->p_iov_info->first_vf_in_pf);
697 return ECORE_SUCCESS;
700 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
701 bool b_fail_malicious)
703 /* Check PF supports sriov */
704 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
705 !IS_PF_SRIOV_ALLOC(p_hwfn))
708 /* Check VF validity */
709 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
715 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
717 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
720 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
721 u16 rel_vf_id, u8 to_disable)
723 struct ecore_vf_info *vf;
726 for_each_hwfn(p_dev, i) {
727 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
729 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
733 vf->to_disable = to_disable;
737 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
742 if (!IS_ECORE_SRIOV(p_dev))
745 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
746 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
750 /* @@@TBD Consider taking outside of ecore... */
751 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
755 enum _ecore_status_t rc = ECORE_SUCCESS;
756 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
758 if (vf != OSAL_NULL) {
760 #ifdef CONFIG_ECORE_SW_CHANNEL
761 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
764 rc = ECORE_UNKNOWN_ERROR;
770 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
771 struct ecore_ptt *p_ptt,
774 ecore_wr(p_hwfn, p_ptt,
775 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
776 1 << (abs_vfid & 0x1f));
779 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
780 struct ecore_ptt *p_ptt,
781 struct ecore_vf_info *vf)
785 /* Set VF masks and configuration - pretend */
786 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
788 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
791 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
793 /* iterate over all queues, clear sb consumer */
794 for (i = 0; i < vf->num_sbs; i++)
795 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
797 vf->opaque_fid, true);
800 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
801 struct ecore_ptt *p_ptt,
802 struct ecore_vf_info *vf, bool enable)
806 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
808 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
811 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
813 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
815 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
818 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
821 static enum _ecore_status_t
822 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
823 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
825 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
826 enum _ecore_status_t rc;
829 return ECORE_SUCCESS;
831 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
832 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
833 ECORE_VF_ABS_ID(p_hwfn, vf));
835 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
836 ECORE_VF_ABS_ID(p_hwfn, vf));
838 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
840 /* It's possible VF was previously considered malicious */
841 vf->b_malicious = false;
843 rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
844 vf->abs_vf_id, vf->num_sbs);
845 if (rc != ECORE_SUCCESS)
848 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
850 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
851 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
853 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
854 p_hwfn->hw_info.hw_mode);
857 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
866 * @brief ecore_iov_config_perm_table - configure the permission
868 * In E4, queue zone permission table size is 320x9. There
869 * are 320 VF queues for single engine device (256 for dual
870 * engine device), and each entry has the following format:
877 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
878 struct ecore_ptt *p_ptt,
879 struct ecore_vf_info *vf, u8 enable)
885 for (qid = 0; qid < vf->num_rxqs; qid++) {
886 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
889 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
890 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
891 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
895 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
896 struct ecore_ptt *p_ptt,
897 struct ecore_vf_info *vf)
899 /* Reset vf in IGU - interrupts are still disabled */
900 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
902 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
904 /* Permission Table */
905 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
908 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
909 struct ecore_ptt *p_ptt,
910 struct ecore_vf_info *vf,
913 struct ecore_igu_block *p_block;
914 struct cau_sb_entry sb_entry;
918 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
920 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
921 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
923 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
924 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
925 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
927 for (qid = 0; qid < num_rx_queues; qid++) {
928 p_block = ecore_get_igu_free_sb(p_hwfn, false);
929 vf->igu_sbs[qid] = p_block->igu_sb_id;
930 p_block->status &= ~ECORE_IGU_STATUS_FREE;
931 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
933 ecore_wr(p_hwfn, p_ptt,
934 IGU_REG_MAPPING_MEMORY +
935 sizeof(u32) * p_block->igu_sb_id, val);
937 /* Configure igu sb in CAU which were marked valid */
938 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
941 ecore_dmae_host2grc(p_hwfn, p_ptt,
942 (u64)(osal_uintptr_t)&sb_entry,
943 CAU_REG_SB_VAR_MEMORY +
944 p_block->igu_sb_id * sizeof(u64), 2, 0);
947 vf->num_sbs = (u8)num_rx_queues;
954 * @brief The function invalidates all the VF entries,
955 * technically this isn't required, but added for
956 * cleaness and ease of debugging incase a VF attempts to
957 * produce an interrupt after it has been taken down.
963 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
964 struct ecore_ptt *p_ptt,
965 struct ecore_vf_info *vf)
967 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
971 /* Invalidate igu CAM lines and mark them as free */
972 for (idx = 0; idx < vf->num_sbs; idx++) {
973 igu_id = vf->igu_sbs[idx];
974 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
976 val = ecore_rd(p_hwfn, p_ptt, addr);
977 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
978 ecore_wr(p_hwfn, p_ptt, addr, val);
980 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
981 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
987 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
989 struct ecore_mcp_link_params *params,
990 struct ecore_mcp_link_state *link,
991 struct ecore_mcp_link_capabilities *p_caps)
993 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
994 struct ecore_bulletin_content *p_bulletin;
999 p_bulletin = p_vf->bulletin.p_virt;
1000 p_bulletin->req_autoneg = params->speed.autoneg;
1001 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1002 p_bulletin->req_forced_speed = params->speed.forced_speed;
1003 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1004 p_bulletin->req_forced_rx = params->pause.forced_rx;
1005 p_bulletin->req_forced_tx = params->pause.forced_tx;
1006 p_bulletin->req_loopback = params->loopback_mode;
1008 p_bulletin->link_up = link->link_up;
1009 p_bulletin->speed = link->speed;
1010 p_bulletin->full_duplex = link->full_duplex;
1011 p_bulletin->autoneg = link->an;
1012 p_bulletin->autoneg_complete = link->an_complete;
1013 p_bulletin->parallel_detection = link->parallel_detection;
1014 p_bulletin->pfc_enabled = link->pfc_enabled;
1015 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1016 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1017 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1018 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1019 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1021 p_bulletin->capability_speed = p_caps->speed_capabilities;
1024 enum _ecore_status_t
1025 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1026 struct ecore_ptt *p_ptt,
1027 struct ecore_iov_vf_init_params *p_params)
1029 struct ecore_mcp_link_capabilities link_caps;
1030 struct ecore_mcp_link_params link_params;
1031 struct ecore_mcp_link_state link_state;
1032 u8 num_of_vf_available_chains = 0;
1033 struct ecore_vf_info *vf = OSAL_NULL;
1035 enum _ecore_status_t rc = ECORE_SUCCESS;
1039 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1041 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1042 return ECORE_UNKNOWN_ERROR;
1046 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1047 p_params->rel_vf_id);
1051 /* Perform sanity checking on the requested vport/rss */
1052 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1053 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1054 p_params->rel_vf_id, p_params->vport_id);
1058 if ((p_params->num_queues > 1) &&
1059 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1060 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1061 p_params->rel_vf_id, p_params->rss_eng_id);
1065 /* TODO - remove this once we get confidence of change */
1066 if (!p_params->vport_id) {
1067 DP_NOTICE(p_hwfn, false,
1068 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1069 p_params->rel_vf_id);
1071 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1072 DP_NOTICE(p_hwfn, false,
1073 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1074 p_params->rel_vf_id);
1076 vf->vport_id = p_params->vport_id;
1077 vf->rss_eng_id = p_params->rss_eng_id;
1079 /* Since it's possible to relocate SBs, it's a bit difficult to check
1080 * things here. Simply check whether the index falls in the range
1081 * belonging to the PF.
1083 for (i = 0; i < p_params->num_queues; i++) {
1084 qid = p_params->req_rx_queue[i];
1085 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1086 DP_NOTICE(p_hwfn, true,
1087 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1088 qid, p_params->rel_vf_id,
1089 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1093 qid = p_params->req_tx_queue[i];
1094 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1095 DP_NOTICE(p_hwfn, true,
1096 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1097 qid, p_params->rel_vf_id,
1098 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1103 /* Limit number of queues according to number of CIDs */
1104 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1105 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1106 "VF[%d] - requesting to initialize for 0x%04x queues"
1107 " [0x%04x CIDs available]\n",
1108 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1109 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1111 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1115 if (num_of_vf_available_chains == 0) {
1116 DP_ERR(p_hwfn, "no available igu sbs\n");
1120 /* Choose queue number and index ranges */
1121 vf->num_rxqs = num_of_vf_available_chains;
1122 vf->num_txqs = num_of_vf_available_chains;
1124 for (i = 0; i < vf->num_rxqs; i++) {
1125 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1127 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1128 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1130 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1131 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1132 vf->relative_vf_id, i, vf->igu_sbs[i],
1133 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1136 /* Update the link configuration in bulletin.
1138 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1139 sizeof(link_params));
1140 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1141 sizeof(link_state));
1142 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1144 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1145 &link_params, &link_state, &link_caps);
1147 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1149 if (rc == ECORE_SUCCESS) {
1151 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1152 (1ULL << (vf->relative_vf_id % 64));
1154 if (IS_LEAD_HWFN(p_hwfn))
1155 p_hwfn->p_dev->p_iov_info->num_vfs++;
1161 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1162 struct ecore_ptt *p_ptt,
1165 struct ecore_mcp_link_capabilities caps;
1166 struct ecore_mcp_link_params params;
1167 struct ecore_mcp_link_state link;
1168 struct ecore_vf_info *vf = OSAL_NULL;
1170 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1172 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1173 return ECORE_UNKNOWN_ERROR;
1176 if (vf->bulletin.p_virt)
1177 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1178 sizeof(*vf->bulletin.p_virt));
1180 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1182 /* Get the link configuration back in bulletin so
1183 * that when VFs are re-enabled they get the actual
1184 * link configuration.
1186 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1187 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1188 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1190 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1192 /* Forget the VF's acquisition message */
1193 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1195 /* disablng interrupts and resetting permission table was done during
1196 * vf-close, however, we could get here without going through vf_close
1198 /* Disable Interrupts for VF */
1199 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1201 /* Reset Permission table */
1202 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1206 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1210 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1211 ~(1ULL << (vf->relative_vf_id / 64));
1213 if (IS_LEAD_HWFN(p_hwfn))
1214 p_hwfn->p_dev->p_iov_info->num_vfs--;
1217 return ECORE_SUCCESS;
1220 static bool ecore_iov_tlv_supported(u16 tlvtype)
1222 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1225 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1226 struct ecore_vf_info *vf, u16 tlv)
1228 /* lock the channel */
1229 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1231 /* record the locking op */
1232 /* vf->op_current = tlv; @@@TBD MichalK */
1235 if (ecore_iov_tlv_supported(tlv))
1238 "VF[%d]: vf pf channel locked by %s\n",
1240 ecore_channel_tlvs_string[tlv]);
1244 "VF[%d]: vf pf channel locked by %04x\n",
1245 vf->abs_vf_id, tlv);
1248 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1249 struct ecore_vf_info *vf,
1252 /* log the unlock */
1253 if (ecore_iov_tlv_supported(expected_tlv))
1256 "VF[%d]: vf pf channel unlocked by %s\n",
1258 ecore_channel_tlvs_string[expected_tlv]);
1262 "VF[%d]: vf pf channel unlocked by %04x\n",
1263 vf->abs_vf_id, expected_tlv);
1265 /* record the locking op */
1266 /* vf->op_current = CHANNEL_TLV_NONE; */
1269 /* place a given tlv on the tlv buffer, continuing current tlv list */
1270 void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
1271 u8 **offset, u16 type, u16 length)
1273 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1276 tl->length = length;
1278 /* Offset should keep pointing to next TLV (the end of the last) */
1281 /* Return a pointer to the start of the added tlv */
1282 return *offset - length;
1285 /* list the types and lengths of the tlvs on the buffer */
1286 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1288 u16 i = 1, total_length = 0;
1289 struct channel_tlv *tlv;
1292 /* cast current tlv list entry to channel tlv header */
1293 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1296 if (ecore_iov_tlv_supported(tlv->type))
1297 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1298 "TLV number %d: type %s, length %d\n",
1299 i, ecore_channel_tlvs_string[tlv->type],
1302 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1303 "TLV number %d: type %d, length %d\n",
1304 i, tlv->type, tlv->length);
1306 if (tlv->type == CHANNEL_TLV_LIST_END)
1309 /* Validate entry - protect against malicious VFs */
1311 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1314 total_length += tlv->length;
1315 if (total_length >= sizeof(struct tlv_buffer_size)) {
1316 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1324 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1325 struct ecore_ptt *p_ptt,
1326 struct ecore_vf_info *p_vf,
1327 u16 length, u8 status)
1329 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1330 struct ecore_dmae_params params;
1333 mbx->reply_virt->default_resp.hdr.status = status;
1335 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1337 #ifdef CONFIG_ECORE_SW_CHANNEL
1338 mbx->sw_mbx.response_size =
1339 length + sizeof(struct channel_list_end_tlv);
1341 if (!p_hwfn->p_dev->b_hw_channel)
1345 eng_vf_id = p_vf->abs_vf_id;
1347 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1348 params.flags = ECORE_DMAE_FLAG_VF_DST;
1349 params.dst_vfid = eng_vf_id;
1351 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1352 mbx->req_virt->first_tlv.reply_address +
1354 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1357 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1358 mbx->req_virt->first_tlv.reply_address,
1359 sizeof(u64) / 4, ¶ms);
1362 GTT_BAR0_MAP_REG_USDM_RAM +
1363 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1366 static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
1367 enum ecore_iov_vport_update_flag flag)
1370 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1371 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1372 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1373 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1374 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1375 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1376 case ECORE_IOV_VP_UPDATE_MCAST:
1377 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1378 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1379 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1380 case ECORE_IOV_VP_UPDATE_RSS:
1381 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1382 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1383 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1384 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1385 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1391 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1392 struct ecore_vf_info *p_vf,
1393 struct ecore_iov_vf_mbx *p_mbx,
1394 u8 status, u16 tlvs_mask,
1397 struct pfvf_def_resp_tlv *resp;
1398 u16 size, total_len, i;
1400 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1401 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1402 size = sizeof(struct pfvf_def_resp_tlv);
1405 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1407 /* Prepare response for all extended tlvs if they are found by PF */
1408 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1409 if (!(tlvs_mask & (1 << i)))
1412 resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
1413 ecore_iov_vport_to_tlv(p_hwfn, i), size);
1415 if (tlvs_accepted & (1 << i))
1416 resp->hdr.status = status;
1418 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1420 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1421 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1422 p_vf->relative_vf_id,
1423 ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1428 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1429 sizeof(struct channel_list_end_tlv));
1434 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1435 struct ecore_ptt *p_ptt,
1436 struct ecore_vf_info *vf_info,
1437 u16 type, u16 length, u8 status)
1439 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1441 mbx->offset = (u8 *)mbx->reply_virt;
1443 ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
1444 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1445 sizeof(struct channel_list_end_tlv));
1447 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1449 OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
1452 struct ecore_public_vf_info
1453 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1455 bool b_enabled_only)
1457 struct ecore_vf_info *vf = OSAL_NULL;
1459 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1463 return &vf->p_vf_info;
1466 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1467 struct ecore_vf_info *p_vf)
1470 p_vf->vf_bulletin = 0;
1471 p_vf->vport_instance = 0;
1472 p_vf->configured_features = 0;
1474 /* If VF previously requested less resources, go back to default */
1475 p_vf->num_rxqs = p_vf->num_sbs;
1476 p_vf->num_txqs = p_vf->num_sbs;
1478 p_vf->num_active_rxqs = 0;
1480 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1481 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1483 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1484 if (!p_queue->cids[j].p_cid)
1487 ecore_eth_queue_cid_release(p_hwfn,
1488 p_queue->cids[j].p_cid);
1489 p_queue->cids[j].p_cid = OSAL_NULL;
1493 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1494 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1495 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1498 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1499 struct ecore_ptt *p_ptt,
1500 struct ecore_vf_info *p_vf,
1501 struct vf_pf_resc_request *p_req,
1502 struct pf_vf_resc *p_resp)
1506 /* Queue related information */
1507 p_resp->num_rxqs = p_vf->num_rxqs;
1508 p_resp->num_txqs = p_vf->num_txqs;
1509 p_resp->num_sbs = p_vf->num_sbs;
1511 for (i = 0; i < p_resp->num_sbs; i++) {
1512 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1513 /* TODO - what's this sb_qid field? Is it deprecated?
1514 * or is there an ecore_client that looks at this?
1516 p_resp->hw_sbs[i].sb_qid = 0;
1519 /* These fields are filled for backward compatibility.
1520 * Unused by modern vfs.
1522 for (i = 0; i < p_resp->num_rxqs; i++) {
1523 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1524 (u16 *)&p_resp->hw_qid[i]);
1528 /* Filter related information */
1529 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1530 p_req->num_mac_filters);
1531 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1532 p_req->num_vlan_filters);
1535 OSAL_MIN_T(u8, p_req->num_cids,
1536 p_hwfn->pf_params.eth_pf_params.num_vf_cons);
1538 /* This isn't really needed/enforced, but some legacy VFs might depend
1539 * on the correct filling of this field.
1541 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1543 /* Validate sufficient resources for VF */
1544 if (p_resp->num_rxqs < p_req->num_rxqs ||
1545 p_resp->num_txqs < p_req->num_txqs ||
1546 p_resp->num_sbs < p_req->num_sbs ||
1547 p_resp->num_mac_filters < p_req->num_mac_filters ||
1548 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1549 p_resp->num_mc_filters < p_req->num_mc_filters ||
1550 p_resp->num_cids < p_req->num_cids) {
1551 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1552 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1554 p_req->num_rxqs, p_resp->num_rxqs,
1555 p_req->num_rxqs, p_resp->num_txqs,
1556 p_req->num_sbs, p_resp->num_sbs,
1557 p_req->num_mac_filters, p_resp->num_mac_filters,
1558 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1559 p_req->num_mc_filters, p_resp->num_mc_filters,
1560 p_req->num_cids, p_resp->num_cids);
1562 /* Some legacy OSes are incapable of correctly handling this
1565 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1566 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1567 (p_vf->acquire.vfdev_info.os_type ==
1568 VFPF_ACQUIRE_OS_WINDOWS))
1569 return PFVF_STATUS_SUCCESS;
1571 return PFVF_STATUS_NO_RESOURCE;
1574 return PFVF_STATUS_SUCCESS;
1577 static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
1578 struct pfvf_stats_info *p_stats)
1580 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1581 OFFSETOF(struct mstorm_vf_zone,
1582 non_trigger.eth_queue_stat);
1583 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1584 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1585 OFFSETOF(struct ustorm_vf_zone,
1586 non_trigger.eth_queue_stat);
1587 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1588 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1589 OFFSETOF(struct pstorm_vf_zone,
1590 non_trigger.eth_queue_stat);
1591 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1592 p_stats->tstats.address = 0;
1593 p_stats->tstats.len = 0;
1596 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1597 struct ecore_ptt *p_ptt,
1598 struct ecore_vf_info *vf)
1600 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1601 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1602 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1603 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1604 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1605 struct pf_vf_resc *resc = &resp->resc;
1606 enum _ecore_status_t rc;
1608 OSAL_MEMSET(resp, 0, sizeof(*resp));
1610 /* Write the PF version so that VF would know which version
1611 * is supported - might be later overridden. This guarantees that
1612 * VF could recognize legacy PF based on lack of versions in reply.
1614 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1615 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1617 /* TODO - not doing anything is bad since we'll assert, but this isn't
1618 * necessarily the right behavior - perhaps we should have allowed some
1621 if (vf->state != VF_FREE &&
1622 vf->state != VF_STOPPED) {
1623 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1624 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1625 vf->abs_vf_id, vf->state);
1629 /* Validate FW compatibility */
1630 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1631 if (req->vfdev_info.capabilities &
1632 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1633 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1635 /* This legacy support would need to be removed once
1636 * the major has changed.
1638 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1640 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1641 "VF[%d] is pre-fastpath HSI\n",
1643 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1644 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1647 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1648 " incompatible with loaded FW's faspath"
1651 req->vfdev_info.eth_fp_hsi_major,
1652 req->vfdev_info.eth_fp_hsi_minor,
1653 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1659 /* On 100g PFs, prevent old VFs from loading */
1660 if ((p_hwfn->p_dev->num_hwfns > 1) &&
1661 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1663 "VF[%d] is running an old driver that doesn't support"
1669 #ifndef __EXTRACT__LINUX__
1670 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1671 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1676 /* Store the acquire message */
1677 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1679 vf->opaque_fid = req->vfdev_info.opaque_fid;
1681 vf->vf_bulletin = req->bulletin_addr;
1682 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1683 vf->bulletin.size : req->bulletin_size;
1685 /* fill in pfdev info */
1686 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1687 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1688 pfdev_info->indices_per_sb = PIS_PER_SB;
1690 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1691 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1692 if (p_hwfn->p_dev->num_hwfns > 1)
1693 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1695 /* Share our ability to use multiple queue-ids only with VFs
1698 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1699 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1701 ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1703 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1706 pfdev_info->fw_major = FW_MAJOR_VERSION;
1707 pfdev_info->fw_minor = FW_MINOR_VERSION;
1708 pfdev_info->fw_rev = FW_REVISION_VERSION;
1709 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1711 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1714 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1715 req->vfdev_info.eth_fp_hsi_minor);
1716 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1717 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1720 pfdev_info->dev_type = p_hwfn->p_dev->type;
1721 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1723 /* Fill resources available to VF; Make sure there are enough to
1724 * satisfy the VF's request.
1726 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1727 &req->resc_request, resc);
1728 if (vfpf_status != PFVF_STATUS_SUCCESS)
1731 /* Start the VF in FW */
1732 rc = ecore_sp_vf_start(p_hwfn, vf);
1733 if (rc != ECORE_SUCCESS) {
1734 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1736 vfpf_status = PFVF_STATUS_FAILURE;
1740 /* Fill agreed size of bulletin board in response, and post
1741 * an initial image to the bulletin board.
1743 resp->bulletin_size = vf->bulletin.size;
1744 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1746 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1747 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1748 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1749 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1751 vf->abs_vf_id, resp->pfdev_info.chip_num,
1752 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1753 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1754 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1755 resc->num_vlan_filters);
1757 vf->state = VF_ACQUIRED;
1760 /* Prepare Response */
1761 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1762 sizeof(struct pfvf_acquire_resp_tlv),
1766 static enum _ecore_status_t
1767 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1768 struct ecore_vf_info *p_vf, bool val)
1770 struct ecore_sp_vport_update_params params;
1771 enum _ecore_status_t rc;
1773 if (val == p_vf->spoof_chk) {
1774 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1775 "Spoofchk value[%d] is already configured\n", val);
1776 return ECORE_SUCCESS;
1779 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1780 params.opaque_fid = p_vf->opaque_fid;
1781 params.vport_id = p_vf->vport_id;
1782 params.update_anti_spoofing_en_flg = 1;
1783 params.anti_spoofing_en = val;
1785 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1787 if (rc == ECORE_SUCCESS) {
1788 p_vf->spoof_chk = val;
1789 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1790 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1791 "Spoofchk val[%d] configured\n", val);
1793 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1794 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1795 val, p_vf->relative_vf_id);
1801 static enum _ecore_status_t
1802 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1803 struct ecore_vf_info *p_vf)
1805 struct ecore_filter_ucast filter;
1806 enum _ecore_status_t rc = ECORE_SUCCESS;
1809 OSAL_MEMSET(&filter, 0, sizeof(filter));
1810 filter.is_rx_filter = 1;
1811 filter.is_tx_filter = 1;
1812 filter.vport_to_add_to = p_vf->vport_id;
1813 filter.opcode = ECORE_FILTER_ADD;
1815 /* Reconfigure vlans */
1816 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1817 if (!p_vf->shadow_config.vlans[i].used)
1820 filter.type = ECORE_FILTER_VLAN;
1821 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1822 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1823 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1824 filter.vlan, p_vf->relative_vf_id);
1825 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1826 &filter, ECORE_SPQ_MODE_CB,
1829 DP_NOTICE(p_hwfn, true,
1830 "Failed to configure VLAN [%04x]"
1832 filter.vlan, p_vf->relative_vf_id);
1840 static enum _ecore_status_t
1841 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1842 struct ecore_vf_info *p_vf, u64 events)
1844 enum _ecore_status_t rc = ECORE_SUCCESS;
1846 /*TODO - what about MACs? */
1848 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1849 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1850 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1855 static enum _ecore_status_t
1856 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1857 struct ecore_vf_info *p_vf,
1860 enum _ecore_status_t rc = ECORE_SUCCESS;
1861 struct ecore_filter_ucast filter;
1863 if (!p_vf->vport_instance)
1866 if (events & (1 << MAC_ADDR_FORCED)) {
1867 /* Since there's no way [currently] of removing the MAC,
1868 * we can always assume this means we need to force it.
1870 OSAL_MEMSET(&filter, 0, sizeof(filter));
1871 filter.type = ECORE_FILTER_MAC;
1872 filter.opcode = ECORE_FILTER_REPLACE;
1873 filter.is_rx_filter = 1;
1874 filter.is_tx_filter = 1;
1875 filter.vport_to_add_to = p_vf->vport_id;
1876 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1878 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1880 ECORE_SPQ_MODE_CB, OSAL_NULL);
1882 DP_NOTICE(p_hwfn, true,
1883 "PF failed to configure MAC for VF\n");
1887 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1890 if (events & (1 << VLAN_ADDR_FORCED)) {
1891 struct ecore_sp_vport_update_params vport_update;
1895 OSAL_MEMSET(&filter, 0, sizeof(filter));
1896 filter.type = ECORE_FILTER_VLAN;
1897 filter.is_rx_filter = 1;
1898 filter.is_tx_filter = 1;
1899 filter.vport_to_add_to = p_vf->vport_id;
1900 filter.vlan = p_vf->bulletin.p_virt->pvid;
1901 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
1904 /* Send the ramrod */
1905 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1907 ECORE_SPQ_MODE_CB, OSAL_NULL);
1909 DP_NOTICE(p_hwfn, true,
1910 "PF failed to configure VLAN for VF\n");
1914 /* Update the default-vlan & silent vlan stripping */
1915 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
1916 vport_update.opaque_fid = p_vf->opaque_fid;
1917 vport_update.vport_id = p_vf->vport_id;
1918 vport_update.update_default_vlan_enable_flg = 1;
1919 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1920 vport_update.update_default_vlan_flg = 1;
1921 vport_update.default_vlan = filter.vlan;
1923 vport_update.update_inner_vlan_removal_flg = 1;
1924 removal = filter.vlan ?
1925 1 : p_vf->shadow_config.inner_vlan_removal;
1926 vport_update.inner_vlan_removal_flg = removal;
1927 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1928 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
1929 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
1931 DP_NOTICE(p_hwfn, true,
1932 "PF failed to configure VF vport for vlan\n");
1936 /* Update all the Rx queues */
1937 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1938 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1939 struct ecore_queue_cid *p_cid = OSAL_NULL;
1941 /* There can be at most 1 Rx queue on qzone. Find it */
1942 p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
1944 if (p_cid == OSAL_NULL)
1947 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
1950 ECORE_SPQ_MODE_EBLOCK,
1953 DP_NOTICE(p_hwfn, true,
1954 "Failed to send Rx update"
1955 " fo queue[0x%04x]\n",
1956 p_cid->rel.queue_id);
1962 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1964 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1967 /* If forced features are terminated, we need to configure the shadow
1968 * configuration back again.
1971 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1976 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
1977 struct ecore_ptt *p_ptt,
1978 struct ecore_vf_info *vf)
1980 struct ecore_sp_vport_start_params params = { 0 };
1981 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1982 struct vfpf_vport_start_tlv *start;
1983 u8 status = PFVF_STATUS_SUCCESS;
1984 struct ecore_vf_info *vf_info;
1987 enum _ecore_status_t rc;
1989 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
1991 DP_NOTICE(p_hwfn->p_dev, true,
1992 "Failed to get VF info, invalid vfid [%d]\n",
1993 vf->relative_vf_id);
1997 vf->state = VF_ENABLED;
1998 start = &mbx->req_virt->start_vport;
2000 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2002 /* Initialize Status block in CAU */
2003 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2004 if (!start->sb_addr[sb_id]) {
2005 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2006 "VF[%d] did not fill the address of SB %d\n",
2007 vf->relative_vf_id, sb_id);
2011 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2012 start->sb_addr[sb_id],
2017 vf->mtu = start->mtu;
2018 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2020 /* Take into consideration configuration forced by hypervisor;
2021 * If none is configured, use the supplied VF values [for old
2022 * vfs that would still be fine, since they passed '0' as padding].
2024 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2025 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2026 u8 vf_req = start->only_untagged;
2028 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2029 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2032 params.tpa_mode = start->tpa_mode;
2033 params.remove_inner_vlan = start->inner_vlan_removal;
2034 params.tx_switching = true;
2037 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2038 DP_NOTICE(p_hwfn, false,
2039 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2040 params.tx_switching = false;
2044 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2045 params.drop_ttl0 = false;
2046 params.concrete_fid = vf->concrete_fid;
2047 params.opaque_fid = vf->opaque_fid;
2048 params.vport_id = vf->vport_id;
2049 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2050 params.mtu = vf->mtu;
2051 params.check_mac = true;
2053 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2054 if (rc != ECORE_SUCCESS) {
2056 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2057 status = PFVF_STATUS_FAILURE;
2059 vf->vport_instance++;
2061 /* Force configuration if needed on the newly opened vport */
2062 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2063 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2064 vf->vport_id, vf->opaque_fid);
2065 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2068 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2069 sizeof(struct pfvf_def_resp_tlv), status);
2072 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2073 struct ecore_ptt *p_ptt,
2074 struct ecore_vf_info *vf)
2076 u8 status = PFVF_STATUS_SUCCESS;
2077 enum _ecore_status_t rc;
2079 vf->vport_instance--;
2080 vf->spoof_chk = false;
2082 if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
2083 (ecore_iov_validate_active_txq(p_hwfn, vf))) {
2084 vf->b_malicious = true;
2085 DP_NOTICE(p_hwfn, false,
2086 "VF [%02x] - considered malicious;"
2087 " Unable to stop RX/TX queuess\n",
2091 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2092 if (rc != ECORE_SUCCESS) {
2094 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2095 status = PFVF_STATUS_FAILURE;
2098 /* Forget the configuration on the vport */
2099 vf->configured_features = 0;
2100 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2102 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2103 sizeof(struct pfvf_def_resp_tlv), status);
2106 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2107 struct ecore_ptt *p_ptt,
2108 struct ecore_vf_info *vf,
2109 u8 status, bool b_legacy)
2111 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2112 struct pfvf_start_queue_resp_tlv *p_tlv;
2113 struct vfpf_start_rxq_tlv *req;
2116 mbx->offset = (u8 *)mbx->reply_virt;
2118 /* Taking a bigger struct instead of adding a TLV to list was a
2119 * mistake, but one which we're now stuck with, as some older
2120 * clients assume the size of the previous response.
2123 length = sizeof(*p_tlv);
2125 length = sizeof(struct pfvf_def_resp_tlv);
2127 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2129 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2130 sizeof(struct channel_list_end_tlv));
2132 /* Update the TLV with the response */
2133 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2134 req = &mbx->req_virt->start_rxq;
2135 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2136 OFFSETOF(struct mstorm_vf_zone,
2137 non_trigger.eth_rx_queue_producers) +
2138 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2141 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2144 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
2145 struct ecore_vf_info *p_vf, bool b_is_tx)
2147 struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2148 struct vfpf_qid_tlv *p_qid_tlv;
2150 /* Search for the qid if the VF published if its going to provide it */
2151 if (!(p_vf->acquire.vfdev_info.capabilities &
2152 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2154 return ECORE_IOV_LEGACY_QID_TX;
2156 return ECORE_IOV_LEGACY_QID_RX;
2159 p_qid_tlv = (struct vfpf_qid_tlv *)
2160 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2162 if (p_qid_tlv == OSAL_NULL) {
2163 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2164 "VF[%2x]: Failed to provide qid\n",
2165 p_vf->relative_vf_id);
2167 return ECORE_IOV_QID_INVALID;
2170 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2171 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2172 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2173 p_vf->relative_vf_id, p_qid_tlv->qid);
2174 return ECORE_IOV_QID_INVALID;
2177 return p_qid_tlv->qid;
2180 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2181 struct ecore_ptt *p_ptt,
2182 struct ecore_vf_info *vf)
2184 struct ecore_queue_start_common_params params;
2185 struct ecore_queue_cid_vf_params vf_params;
2186 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2187 u8 status = PFVF_STATUS_NO_RESOURCE;
2188 u8 qid_usage_idx, vf_legacy = 0;
2189 struct ecore_vf_queue *p_queue;
2190 struct vfpf_start_rxq_tlv *req;
2191 struct ecore_queue_cid *p_cid;
2192 struct ecore_sb_info sb_dummy;
2193 enum _ecore_status_t rc;
2195 req = &mbx->req_virt->start_rxq;
2197 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2198 ECORE_IOV_VALIDATE_Q_DISABLE) ||
2199 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2202 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2203 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2206 p_queue = &vf->vf_queues[req->rx_qid];
2207 if (p_queue->cids[qid_usage_idx].p_cid)
2210 vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
2212 /* Acquire a new queue-cid */
2213 OSAL_MEMSET(¶ms, 0, sizeof(params));
2214 params.queue_id = (u8)p_queue->fw_rx_qid;
2215 params.vport_id = vf->vport_id;
2216 params.stats_id = vf->abs_vf_id + 0x10;
2218 /* Since IGU index is passed via sb_info, construct a dummy one */
2219 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2220 sb_dummy.igu_sb_id = req->hw_sb;
2221 params.p_sb = &sb_dummy;
2222 params.sb_idx = req->sb_index;
2224 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2225 vf_params.vfid = vf->relative_vf_id;
2226 vf_params.vf_qid = (u8)req->rx_qid;
2227 vf_params.vf_legacy = vf_legacy;
2228 vf_params.qid_usage_idx = qid_usage_idx;
2230 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2231 ¶ms, &vf_params);
2232 if (p_cid == OSAL_NULL)
2235 /* Legacy VFs have their Producers in a different location, which they
2236 * calculate on their own and clean the producer prior to this.
2238 if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
2240 GTT_BAR0_MAP_REG_MSDM_RAM +
2241 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2244 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2249 if (rc != ECORE_SUCCESS) {
2250 status = PFVF_STATUS_FAILURE;
2251 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2253 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2254 p_queue->cids[qid_usage_idx].b_is_tx = false;
2255 status = PFVF_STATUS_SUCCESS;
2256 vf->num_active_rxqs++;
2260 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2262 ECORE_QCID_LEGACY_VF_RX_PROD));
2266 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2267 struct ecore_tunnel_info *p_tun,
2268 u16 tunn_feature_mask)
2270 p_resp->tunn_feature_mask = tunn_feature_mask;
2271 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2272 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2273 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2274 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2275 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2276 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2277 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2278 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2279 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2280 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2281 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2282 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2286 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2287 struct ecore_tunn_update_type *p_tun,
2288 enum ecore_tunn_mode mask, u8 tun_cls)
2290 if (p_req->tun_mode_update_mask & (1 << mask)) {
2291 p_tun->b_update_mode = true;
2293 if (p_req->tunn_mode & (1 << mask))
2294 p_tun->b_mode_enabled = true;
2297 p_tun->tun_cls = tun_cls;
2301 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2302 struct ecore_tunn_update_type *p_tun,
2303 struct ecore_tunn_update_udp_port *p_port,
2304 enum ecore_tunn_mode mask,
2305 u8 tun_cls, u8 update_port, u16 port)
2308 p_port->b_update_port = true;
2309 p_port->port = port;
2312 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2316 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2318 bool b_update_requested = false;
2320 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2321 p_req->update_geneve_port || p_req->update_vxlan_port)
2322 b_update_requested = true;
2324 return b_update_requested;
2327 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2328 struct ecore_ptt *p_ptt,
2329 struct ecore_vf_info *p_vf)
2331 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2332 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2333 struct pfvf_update_tunn_param_tlv *p_resp;
2334 struct vfpf_update_tunn_param_tlv *p_req;
2335 enum _ecore_status_t rc = ECORE_SUCCESS;
2336 u8 status = PFVF_STATUS_SUCCESS;
2337 bool b_update_required = false;
2338 struct ecore_tunnel_info tunn;
2339 u16 tunn_feature_mask = 0;
2342 mbx->offset = (u8 *)mbx->reply_virt;
2344 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2345 p_req = &mbx->req_virt->tunn_param_update;
2347 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2348 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2349 "No tunnel update requested by VF\n");
2350 status = PFVF_STATUS_FAILURE;
2354 tunn.b_update_rx_cls = p_req->update_tun_cls;
2355 tunn.b_update_tx_cls = p_req->update_tun_cls;
2357 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2358 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2359 p_req->update_vxlan_port,
2361 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2362 ECORE_MODE_L2GENEVE_TUNN,
2363 p_req->l2geneve_clss,
2364 p_req->update_geneve_port,
2365 p_req->geneve_port);
2366 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2367 ECORE_MODE_IPGENEVE_TUNN,
2368 p_req->ipgeneve_clss);
2369 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2370 ECORE_MODE_L2GRE_TUNN,
2372 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2373 ECORE_MODE_IPGRE_TUNN,
2376 /* If PF modifies VF's req then it should
2377 * still return an error in case of partial configuration
2378 * or modified configuration as opposed to requested one.
2380 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2381 &b_update_required, &tunn);
2383 if (rc != ECORE_SUCCESS)
2384 status = PFVF_STATUS_FAILURE;
2386 /* If ECORE client is willing to update anything ? */
2387 if (b_update_required) {
2390 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2391 ECORE_SPQ_MODE_EBLOCK,
2393 if (rc != ECORE_SUCCESS)
2394 status = PFVF_STATUS_FAILURE;
2396 geneve_port = p_tun->geneve_port.port;
2397 ecore_for_each_vf(p_hwfn, i) {
2398 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2399 p_tun->vxlan_port.port,
2405 p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
2406 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2408 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2409 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2410 sizeof(struct channel_list_end_tlv));
2412 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2415 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2416 struct ecore_ptt *p_ptt,
2417 struct ecore_vf_info *p_vf,
2421 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2422 struct pfvf_start_queue_resp_tlv *p_tlv;
2423 bool b_legacy = false;
2426 mbx->offset = (u8 *)mbx->reply_virt;
2428 /* Taking a bigger struct instead of adding a TLV to list was a
2429 * mistake, but one which we're now stuck with, as some older
2430 * clients assume the size of the previous response.
2432 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2433 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2437 length = sizeof(*p_tlv);
2439 length = sizeof(struct pfvf_def_resp_tlv);
2441 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2443 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2444 sizeof(struct channel_list_end_tlv));
2446 /* Update the TLV with the response */
2447 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2448 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2450 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2453 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2454 struct ecore_ptt *p_ptt,
2455 struct ecore_vf_info *vf)
2457 struct ecore_queue_start_common_params params;
2458 struct ecore_queue_cid_vf_params vf_params;
2459 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2460 u8 status = PFVF_STATUS_NO_RESOURCE;
2461 struct ecore_vf_queue *p_queue;
2462 struct vfpf_start_txq_tlv *req;
2463 struct ecore_queue_cid *p_cid;
2464 struct ecore_sb_info sb_dummy;
2465 u8 qid_usage_idx, vf_legacy;
2467 enum _ecore_status_t rc;
2470 OSAL_MEMSET(¶ms, 0, sizeof(params));
2471 req = &mbx->req_virt->start_txq;
2473 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2474 ECORE_IOV_VALIDATE_Q_NA) ||
2475 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2478 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2479 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2482 p_queue = &vf->vf_queues[req->tx_qid];
2483 if (p_queue->cids[qid_usage_idx].p_cid)
2486 vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
2488 /* Acquire a new queue-cid */
2489 params.queue_id = p_queue->fw_tx_qid;
2490 params.vport_id = vf->vport_id;
2491 params.stats_id = vf->abs_vf_id + 0x10;
2493 /* Since IGU index is passed via sb_info, construct a dummy one */
2494 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2495 sb_dummy.igu_sb_id = req->hw_sb;
2496 params.p_sb = &sb_dummy;
2497 params.sb_idx = req->sb_index;
2499 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2500 vf_params.vfid = vf->relative_vf_id;
2501 vf_params.vf_qid = (u8)req->tx_qid;
2502 vf_params.vf_legacy = vf_legacy;
2503 vf_params.qid_usage_idx = qid_usage_idx;
2505 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2506 ¶ms, &vf_params);
2507 if (p_cid == OSAL_NULL)
2510 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2511 vf->relative_vf_id);
2512 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2513 req->pbl_addr, req->pbl_size, pq);
2514 if (rc != ECORE_SUCCESS) {
2515 status = PFVF_STATUS_FAILURE;
2516 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2518 status = PFVF_STATUS_SUCCESS;
2519 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2520 p_queue->cids[qid_usage_idx].b_is_tx = true;
2525 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2529 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2530 struct ecore_vf_info *vf,
2533 bool cqe_completion)
2535 struct ecore_vf_queue *p_queue;
2536 enum _ecore_status_t rc = ECORE_SUCCESS;
2538 if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
2539 ECORE_IOV_VALIDATE_Q_NA)) {
2540 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2541 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2542 vf->relative_vf_id, rxq_id, qid_usage_idx);
2546 p_queue = &vf->vf_queues[rxq_id];
2548 /* We've validated the index and the existence of the active RXQ -
2549 * now we need to make sure that it's using the correct qid.
2551 if (!p_queue->cids[qid_usage_idx].p_cid ||
2552 p_queue->cids[qid_usage_idx].b_is_tx) {
2553 struct ecore_queue_cid *p_cid;
2555 p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf, p_queue);
2556 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2557 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2558 vf->relative_vf_id, rxq_id, qid_usage_idx,
2559 rxq_id, p_cid->qid_usage_idx);
2563 /* Now that we know we have a valid Rx-queue - close it */
2564 rc = ecore_eth_rx_queue_stop(p_hwfn,
2565 p_queue->cids[qid_usage_idx].p_cid,
2566 false, cqe_completion);
2567 if (rc != ECORE_SUCCESS)
2570 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2571 vf->num_active_rxqs--;
2573 return ECORE_SUCCESS;
2576 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2577 struct ecore_vf_info *vf,
2581 struct ecore_vf_queue *p_queue;
2582 enum _ecore_status_t rc = ECORE_SUCCESS;
2584 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2585 ECORE_IOV_VALIDATE_Q_NA))
2588 p_queue = &vf->vf_queues[txq_id];
2589 if (!p_queue->cids[qid_usage_idx].p_cid ||
2590 !p_queue->cids[qid_usage_idx].b_is_tx)
2593 rc = ecore_eth_tx_queue_stop(p_hwfn,
2594 p_queue->cids[qid_usage_idx].p_cid);
2595 if (rc != ECORE_SUCCESS)
2598 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2599 return ECORE_SUCCESS;
2602 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2603 struct ecore_ptt *p_ptt,
2604 struct ecore_vf_info *vf)
2606 u16 length = sizeof(struct pfvf_def_resp_tlv);
2607 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2608 u8 status = PFVF_STATUS_FAILURE;
2609 struct vfpf_stop_rxqs_tlv *req;
2611 enum _ecore_status_t rc;
2613 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
2614 * would be one. Since no older ecore passed multiple queues
2615 * using this API, sanitize on the value.
2617 req = &mbx->req_virt->stop_rxqs;
2618 if (req->num_rxqs != 1) {
2619 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2620 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2621 vf->relative_vf_id);
2622 status = PFVF_STATUS_NOT_SUPPORTED;
2626 /* Find which qid-index is associated with the queue */
2627 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2628 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2631 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2632 qid_usage_idx, req->cqe_completion);
2633 if (rc == ECORE_SUCCESS)
2634 status = PFVF_STATUS_SUCCESS;
2636 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2640 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2641 struct ecore_ptt *p_ptt,
2642 struct ecore_vf_info *vf)
2644 u16 length = sizeof(struct pfvf_def_resp_tlv);
2645 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2646 u8 status = PFVF_STATUS_FAILURE;
2647 struct vfpf_stop_txqs_tlv *req;
2649 enum _ecore_status_t rc;
2651 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
2652 * would be one. Since no older ecore passed multiple queues
2653 * using this API, sanitize on the value.
2655 req = &mbx->req_virt->stop_txqs;
2656 if (req->num_txqs != 1) {
2657 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2658 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2659 vf->relative_vf_id);
2660 status = PFVF_STATUS_NOT_SUPPORTED;
2664 /* Find which qid-index is associated with the queue */
2665 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2666 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2669 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
2671 if (rc == ECORE_SUCCESS)
2672 status = PFVF_STATUS_SUCCESS;
2675 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2679 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2680 struct ecore_ptt *p_ptt,
2681 struct ecore_vf_info *vf)
2683 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2684 u16 length = sizeof(struct pfvf_def_resp_tlv);
2685 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2686 struct vfpf_update_rxq_tlv *req;
2687 u8 status = PFVF_STATUS_FAILURE;
2688 u8 complete_event_flg;
2689 u8 complete_cqe_flg;
2691 enum _ecore_status_t rc;
2694 req = &mbx->req_virt->update_rxq;
2695 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2696 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2698 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2699 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2702 /* Starting with the addition of CHANNEL_TLV_QID, this API started
2703 * expecting a single queue at a time. Validate this.
2705 if ((vf->acquire.vfdev_info.capabilities &
2706 VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
2707 req->num_rxqs != 1) {
2708 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2709 "VF[%d] supports QIDs but sends multiple queues\n",
2710 vf->relative_vf_id);
2714 /* Validate inputs - for the legacy case this is still true since
2715 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2717 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2718 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2719 ECORE_IOV_VALIDATE_Q_NA) ||
2720 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2721 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2722 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2723 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2724 vf->relative_vf_id, req->rx_qid,
2730 for (i = 0; i < req->num_rxqs; i++) {
2731 u16 qid = req->rx_qid + i;
2733 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2736 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2740 ECORE_SPQ_MODE_EBLOCK,
2742 if (rc != ECORE_SUCCESS)
2745 status = PFVF_STATUS_SUCCESS;
2747 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2751 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2752 void *p_tlvs_list, u16 req_type)
2754 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2758 if (!p_tlv->length) {
2759 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2763 if (p_tlv->type == req_type) {
2764 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2765 "Extended tlv type %s, length %d found\n",
2766 ecore_channel_tlvs_string[p_tlv->type],
2771 len += p_tlv->length;
2772 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2774 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2775 DP_NOTICE(p_hwfn, true,
2776 "TLVs has overrun the buffer size\n");
2779 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2785 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2786 struct ecore_sp_vport_update_params *p_data,
2787 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2789 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2790 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2792 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2793 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2797 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2798 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2799 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2800 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2801 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2805 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2806 struct ecore_sp_vport_update_params *p_data,
2807 struct ecore_vf_info *p_vf,
2808 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2810 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2811 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2813 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2814 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2818 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2820 /* Ignore the VF request if we're forcing a vlan */
2821 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2822 p_data->update_inner_vlan_removal_flg = 1;
2823 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2826 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2830 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2831 struct ecore_sp_vport_update_params *p_data,
2832 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2834 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2835 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2837 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2838 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2839 if (!p_tx_switch_tlv)
2843 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2844 DP_NOTICE(p_hwfn, false,
2845 "FPGA: Ignore tx-switching configuration originating"
2851 p_data->update_tx_switching_flg = 1;
2852 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2853 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2857 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2858 struct ecore_sp_vport_update_params *p_data,
2859 struct ecore_iov_vf_mbx *p_mbx,
2862 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2863 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2865 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2866 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2870 p_data->update_approx_mcast_flg = 1;
2871 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2872 sizeof(unsigned long) *
2873 ETH_MULTICAST_MAC_BINS_IN_REGS);
2874 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2878 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2879 struct ecore_sp_vport_update_params *p_data,
2880 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2882 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2883 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2884 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2886 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2887 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2891 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2892 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2893 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2894 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2895 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
2899 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
2900 struct ecore_sp_vport_update_params *p_data,
2901 struct ecore_iov_vf_mbx *p_mbx,
2904 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2905 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2907 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2908 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2909 if (!p_accept_any_vlan)
2912 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2913 p_data->update_accept_any_vlan_flg =
2914 p_accept_any_vlan->update_accept_any_vlan_flg;
2915 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2919 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
2920 struct ecore_vf_info *vf,
2921 struct ecore_sp_vport_update_params *p_data,
2922 struct ecore_rss_params *p_rss,
2923 struct ecore_iov_vf_mbx *p_mbx,
2924 u16 *tlvs_mask, u16 *tlvs_accepted)
2926 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2927 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2928 bool b_reject = false;
2932 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2933 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2935 p_data->rss_params = OSAL_NULL;
2939 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
2941 p_rss->update_rss_config =
2942 !!(p_rss_tlv->update_rss_flags &
2943 VFPF_UPDATE_RSS_CONFIG_FLAG);
2944 p_rss->update_rss_capabilities =
2945 !!(p_rss_tlv->update_rss_flags &
2946 VFPF_UPDATE_RSS_CAPS_FLAG);
2947 p_rss->update_rss_ind_table =
2948 !!(p_rss_tlv->update_rss_flags &
2949 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2950 p_rss->update_rss_key =
2951 !!(p_rss_tlv->update_rss_flags &
2952 VFPF_UPDATE_RSS_KEY_FLAG);
2954 p_rss->rss_enable = p_rss_tlv->rss_enable;
2955 p_rss->rss_eng_id = vf->rss_eng_id;
2956 p_rss->rss_caps = p_rss_tlv->rss_caps;
2957 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2958 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
2959 sizeof(p_rss->rss_key));
2961 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
2962 (1 << p_rss_tlv->rss_table_size_log));
2964 for (i = 0; i < table_size; i++) {
2965 struct ecore_queue_cid *p_cid;
2967 q_idx = p_rss_tlv->rss_ind_table[i];
2968 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
2969 ECORE_IOV_VALIDATE_Q_ENABLE)) {
2970 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2971 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2972 vf->relative_vf_id, q_idx);
2977 p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
2978 &vf->vf_queues[q_idx]);
2979 p_rss->rss_ind_table[i] = p_cid;
2982 p_data->rss_params = p_rss;
2984 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2986 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2990 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
2991 struct ecore_vf_info *vf,
2992 struct ecore_sp_vport_update_params *p_data,
2993 struct ecore_sge_tpa_params *p_sge_tpa,
2994 struct ecore_iov_vf_mbx *p_mbx,
2997 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2998 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
3000 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
3001 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3003 if (!p_sge_tpa_tlv) {
3004 p_data->sge_tpa_params = OSAL_NULL;
3008 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
3010 p_sge_tpa->update_tpa_en_flg =
3011 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
3012 p_sge_tpa->update_tpa_param_flg =
3013 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3014 VFPF_UPDATE_TPA_PARAM_FLAG);
3016 p_sge_tpa->tpa_ipv4_en_flg =
3017 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
3018 p_sge_tpa->tpa_ipv6_en_flg =
3019 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
3020 p_sge_tpa->tpa_pkt_split_flg =
3021 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
3022 p_sge_tpa->tpa_hdr_data_split_flg =
3023 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
3024 p_sge_tpa->tpa_gro_consistent_flg =
3025 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
3027 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
3028 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
3029 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
3030 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
3031 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
3033 p_data->sge_tpa_params = p_sge_tpa;
3035 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
3038 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
3039 struct ecore_ptt *p_ptt,
3040 struct ecore_vf_info *vf)
3042 struct ecore_rss_params *p_rss_params = OSAL_NULL;
3043 struct ecore_sp_vport_update_params params;
3044 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3045 struct ecore_sge_tpa_params sge_tpa_params;
3046 u16 tlvs_mask = 0, tlvs_accepted = 0;
3047 u8 status = PFVF_STATUS_SUCCESS;
3049 enum _ecore_status_t rc;
3051 /* Valiate PF can send such a request */
3052 if (!vf->vport_instance) {
3053 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3054 "No VPORT instance available for VF[%d],"
3055 " failing vport update\n",
3057 status = PFVF_STATUS_FAILURE;
3061 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3062 if (p_rss_params == OSAL_NULL) {
3063 status = PFVF_STATUS_FAILURE;
3067 OSAL_MEMSET(¶ms, 0, sizeof(params));
3068 params.opaque_fid = vf->opaque_fid;
3069 params.vport_id = vf->vport_id;
3070 params.rss_params = OSAL_NULL;
3072 /* Search for extended tlvs list and update values
3073 * from VF in struct ecore_sp_vport_update_params.
3075 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3076 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3077 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3078 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3079 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3080 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3081 ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
3082 &sge_tpa_params, mbx, &tlvs_mask);
3084 tlvs_accepted = tlvs_mask;
3086 /* Some of the extended TLVs need to be validated first; In that case,
3087 * they can update the mask without updating the accepted [so that
3088 * PF could communicate to VF it has rejected request].
3090 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3091 mbx, &tlvs_mask, &tlvs_accepted);
3093 /* Just log a message if there is no single extended tlv in buffer.
3094 * When all features of vport update ramrod would be requested by VF
3095 * as extended TLVs in buffer then an error can be returned in response
3096 * if there is no extended TLV present in buffer.
3098 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3099 ¶ms, &tlvs_accepted) !=
3102 status = PFVF_STATUS_NOT_SUPPORTED;
3106 if (!tlvs_accepted) {
3108 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3109 "Upper-layer prevents said VF"
3110 " configuration\n");
3112 DP_NOTICE(p_hwfn, true,
3113 "No feature tlvs found for vport update\n");
3114 status = PFVF_STATUS_NOT_SUPPORTED;
3118 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
3122 status = PFVF_STATUS_FAILURE;
3125 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3126 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3127 tlvs_mask, tlvs_accepted);
3128 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3131 static enum _ecore_status_t
3132 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3133 struct ecore_vf_info *p_vf,
3134 struct ecore_filter_ucast *p_params)
3138 /* First remove entries and then add new ones */
3139 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3140 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3141 if (p_vf->shadow_config.vlans[i].used &&
3142 p_vf->shadow_config.vlans[i].vid ==
3144 p_vf->shadow_config.vlans[i].used = false;
3147 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3148 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3149 "VF [%d] - Tries to remove a non-existing"
3151 p_vf->relative_vf_id);
3154 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3155 p_params->opcode == ECORE_FILTER_FLUSH) {
3156 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3157 p_vf->shadow_config.vlans[i].used = false;
3160 /* In forced mode, we're willing to remove entries - but we don't add
3163 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3164 return ECORE_SUCCESS;
3166 if (p_params->opcode == ECORE_FILTER_ADD ||
3167 p_params->opcode == ECORE_FILTER_REPLACE) {
3168 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3169 if (p_vf->shadow_config.vlans[i].used)
3172 p_vf->shadow_config.vlans[i].used = true;
3173 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3177 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3178 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3179 "VF [%d] - Tries to configure more than %d"
3181 p_vf->relative_vf_id,
3182 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3187 return ECORE_SUCCESS;
3190 static enum _ecore_status_t
3191 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3192 struct ecore_vf_info *p_vf,
3193 struct ecore_filter_ucast *p_params)
3195 char empty_mac[ETH_ALEN];
3198 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3200 /* If we're in forced-mode, we don't allow any change */
3201 /* TODO - this would change if we were ever to implement logic for
3202 * removing a forced MAC altogether [in which case, like for vlans,
3203 * we should be able to re-trace previous configuration.
3205 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3206 return ECORE_SUCCESS;
3208 /* First remove entries and then add new ones */
3209 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3210 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3211 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3212 p_params->mac, ETH_ALEN)) {
3213 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3219 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3220 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3221 "MAC isn't configured\n");
3224 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3225 p_params->opcode == ECORE_FILTER_FLUSH) {
3226 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3227 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3230 /* List the new MAC address */
3231 if (p_params->opcode != ECORE_FILTER_ADD &&
3232 p_params->opcode != ECORE_FILTER_REPLACE)
3233 return ECORE_SUCCESS;
3235 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3236 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3237 empty_mac, ETH_ALEN)) {
3238 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3239 p_params->mac, ETH_ALEN);
3240 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3241 "Added MAC at %d entry in shadow\n", i);
3246 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3247 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3248 "No available place for MAC\n");
3252 return ECORE_SUCCESS;
3255 static enum _ecore_status_t
3256 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3257 struct ecore_vf_info *p_vf,
3258 struct ecore_filter_ucast *p_params)
3260 enum _ecore_status_t rc = ECORE_SUCCESS;
3262 if (p_params->type == ECORE_FILTER_MAC) {
3263 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3264 if (rc != ECORE_SUCCESS)
3268 if (p_params->type == ECORE_FILTER_VLAN)
3269 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3274 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3275 struct ecore_ptt *p_ptt,
3276 struct ecore_vf_info *vf)
3278 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3279 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3280 struct vfpf_ucast_filter_tlv *req;
3281 u8 status = PFVF_STATUS_SUCCESS;
3282 struct ecore_filter_ucast params;
3283 enum _ecore_status_t rc;
3285 /* Prepare the unicast filter params */
3286 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3287 req = &mbx->req_virt->ucast_filter;
3288 params.opcode = (enum ecore_filter_opcode)req->opcode;
3289 params.type = (enum ecore_filter_ucast_type)req->type;
3291 /* @@@TBD - We might need logic on HV side in determining this */
3292 params.is_rx_filter = 1;
3293 params.is_tx_filter = 1;
3294 params.vport_to_remove_from = vf->vport_id;
3295 params.vport_to_add_to = vf->vport_id;
3296 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3297 params.vlan = req->vlan;
3299 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3300 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3301 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3302 vf->abs_vf_id, params.opcode, params.type,
3303 params.is_rx_filter ? "RX" : "",
3304 params.is_tx_filter ? "TX" : "",
3305 params.vport_to_add_to,
3306 params.mac[0], params.mac[1], params.mac[2],
3307 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3309 if (!vf->vport_instance) {
3310 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3311 "No VPORT instance available for VF[%d],"
3312 " failing ucast MAC configuration\n",
3314 status = PFVF_STATUS_FAILURE;
3318 /* Update shadow copy of the VF configuration */
3319 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3321 status = PFVF_STATUS_FAILURE;
3325 /* Determine if the unicast filtering is acceptible by PF */
3326 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3327 (params.type == ECORE_FILTER_VLAN ||
3328 params.type == ECORE_FILTER_MAC_VLAN)) {
3329 /* Once VLAN is forced or PVID is set, do not allow
3330 * to add/replace any further VLANs.
3332 if (params.opcode == ECORE_FILTER_ADD ||
3333 params.opcode == ECORE_FILTER_REPLACE)
3334 status = PFVF_STATUS_FORCED;
3338 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3339 (params.type == ECORE_FILTER_MAC ||
3340 params.type == ECORE_FILTER_MAC_VLAN)) {
3341 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3342 (params.opcode != ECORE_FILTER_ADD &&
3343 params.opcode != ECORE_FILTER_REPLACE))
3344 status = PFVF_STATUS_FORCED;
3348 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3349 if (rc == ECORE_EXISTS) {
3351 } else if (rc == ECORE_INVAL) {
3352 status = PFVF_STATUS_FAILURE;
3356 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3357 ECORE_SPQ_MODE_CB, OSAL_NULL);
3359 status = PFVF_STATUS_FAILURE;
3362 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3363 sizeof(struct pfvf_def_resp_tlv), status);
3366 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3367 struct ecore_ptt *p_ptt,
3368 struct ecore_vf_info *vf)
3373 for (i = 0; i < vf->num_sbs; i++)
3374 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3376 vf->opaque_fid, false);
3378 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3379 sizeof(struct pfvf_def_resp_tlv),
3380 PFVF_STATUS_SUCCESS);
3383 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3384 struct ecore_ptt *p_ptt,
3385 struct ecore_vf_info *vf)
3387 u16 length = sizeof(struct pfvf_def_resp_tlv);
3388 u8 status = PFVF_STATUS_SUCCESS;
3390 /* Disable Interrupts for VF */
3391 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3393 /* Reset Permission table */
3394 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3396 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3400 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3401 struct ecore_ptt *p_ptt,
3402 struct ecore_vf_info *p_vf)
3404 u16 length = sizeof(struct pfvf_def_resp_tlv);
3405 u8 status = PFVF_STATUS_SUCCESS;
3406 enum _ecore_status_t rc = ECORE_SUCCESS;
3408 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3410 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3411 /* Stopping the VF */
3412 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3415 if (rc != ECORE_SUCCESS) {
3416 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3418 status = PFVF_STATUS_FAILURE;
3421 p_vf->state = VF_STOPPED;
3424 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3428 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3429 struct ecore_ptt *p_ptt,
3430 struct ecore_vf_info *vf)
3432 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3433 enum _ecore_status_t rc = ECORE_SUCCESS;
3434 struct vfpf_update_coalesce *req;
3435 u8 status = PFVF_STATUS_FAILURE;
3436 struct ecore_queue_cid *p_cid;
3437 u16 rx_coal, tx_coal;
3441 req = &mbx->req_virt->update_coalesce;
3443 rx_coal = req->rx_coal;
3444 tx_coal = req->tx_coal;
3447 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3448 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3450 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3451 vf->abs_vf_id, qid);
3455 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3456 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3458 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3459 vf->abs_vf_id, qid);
3463 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3464 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3465 vf->abs_vf_id, rx_coal, tx_coal, qid);
3468 p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
3469 &vf->vf_queues[qid]);
3471 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3472 if (rc != ECORE_SUCCESS) {
3473 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3474 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3475 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3478 vf->rx_coal = rx_coal;
3481 /* TODO - in future, it might be possible to pass this in a per-cid
3482 * granularity. For now, do this for all Tx queues.
3485 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3487 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3488 if (p_queue->cids[i].p_cid == OSAL_NULL)
3491 if (!p_queue->cids[i].b_is_tx)
3494 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3495 p_queue->cids[i].p_cid);
3496 if (rc != ECORE_SUCCESS) {
3497 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3498 "VF[%d]: Unable to set tx queue coalesce\n",
3503 vf->tx_coal = tx_coal;
3506 status = PFVF_STATUS_SUCCESS;
3508 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3509 sizeof(struct pfvf_def_resp_tlv), status);
3512 enum _ecore_status_t
3513 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
3514 u16 rx_coal, u16 tx_coal,
3517 struct ecore_queue_cid *p_cid;
3518 struct ecore_vf_info *vf;
3519 struct ecore_ptt *p_ptt;
3522 if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
3523 DP_NOTICE(p_hwfn, true,
3524 "VF[%d] - Can not set coalescing: VF is not active\n",
3529 vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
3530 p_ptt = ecore_ptt_acquire(p_hwfn);
3534 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3535 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3537 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3538 vf->abs_vf_id, qid);
3542 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3543 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3545 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3546 vf->abs_vf_id, qid);
3550 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3551 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3552 vf->abs_vf_id, rx_coal, tx_coal, qid);
3555 p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
3556 &vf->vf_queues[qid]);
3558 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3559 if (rc != ECORE_SUCCESS) {
3560 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3561 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3562 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3565 vf->rx_coal = rx_coal;
3568 /* TODO - in future, it might be possible to pass this in a per-cid
3569 * granularity. For now, do this for all Tx queues.
3572 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3574 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3575 if (p_queue->cids[i].p_cid == OSAL_NULL)
3578 if (!p_queue->cids[i].b_is_tx)
3581 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3582 p_queue->cids[i].p_cid);
3583 if (rc != ECORE_SUCCESS) {
3584 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3585 "VF[%d]: Unable to set tx queue coalesce\n",
3590 vf->tx_coal = tx_coal;
3594 ecore_ptt_release(p_hwfn, p_ptt);
3599 static enum _ecore_status_t
3600 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3601 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3606 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3608 for (cnt = 0; cnt < 50; cnt++) {
3609 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3614 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3618 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3619 p_vf->abs_vf_id, val);
3620 return ECORE_TIMEOUT;
3623 return ECORE_SUCCESS;
3626 static enum _ecore_status_t
3627 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3628 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3630 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3633 /* Read initial consumers & producers */
3634 for (i = 0; i < MAX_NUM_VOQS; i++) {
3637 cons[i] = ecore_rd(p_hwfn, p_ptt,
3638 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3640 prod = ecore_rd(p_hwfn, p_ptt,
3641 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3643 distance[i] = prod - cons[i];
3646 /* Wait for consumers to pass the producers */
3648 for (cnt = 0; cnt < 50; cnt++) {
3649 for (; i < MAX_NUM_VOQS; i++) {
3652 tmp = ecore_rd(p_hwfn, p_ptt,
3653 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3655 if (distance[i] > tmp - cons[i])
3659 if (i == MAX_NUM_VOQS)
3666 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3667 p_vf->abs_vf_id, i);
3668 return ECORE_TIMEOUT;
3671 return ECORE_SUCCESS;
3674 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3675 struct ecore_vf_info *p_vf,
3676 struct ecore_ptt *p_ptt)
3678 enum _ecore_status_t rc;
3680 /* TODO - add SRC and TM polling once we add storage IOV */
3682 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3686 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3690 return ECORE_SUCCESS;
3693 static enum _ecore_status_t
3694 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3695 struct ecore_ptt *p_ptt,
3696 u16 rel_vf_id, u32 *ack_vfs)
3698 struct ecore_vf_info *p_vf;
3699 enum _ecore_status_t rc = ECORE_SUCCESS;
3701 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3703 return ECORE_SUCCESS;
3705 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3706 (1ULL << (rel_vf_id % 64))) {
3707 u16 vfid = p_vf->abs_vf_id;
3709 /* TODO - should we lock channel? */
3711 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3712 "VF[%d] - Handling FLR\n", vfid);
3714 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3716 /* If VF isn't active, no need for anything but SW */
3720 /* TODO - what to do in case of failure? */
3721 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3722 if (rc != ECORE_SUCCESS)
3725 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3727 /* TODO - what's now? What a mess.... */
3728 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3732 /* Workaround to make VF-PF channel ready, as FW
3733 * doesn't do that as a part of FLR.
3736 GTT_BAR0_MAP_REG_USDM_RAM +
3737 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3739 /* VF_STOPPED has to be set only after final cleanup
3740 * but prior to re-enabling the VF.
3742 p_vf->state = VF_STOPPED;
3744 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3746 /* TODO - again, a mess... */
3747 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3752 /* Mark VF for ack and clean pending state */
3753 if (p_vf->state == VF_RESET)
3754 p_vf->state = VF_STOPPED;
3755 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3756 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3757 ~(1ULL << (rel_vf_id % 64));
3758 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
3759 ~(1ULL << (rel_vf_id % 64));
3765 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3766 struct ecore_ptt *p_ptt)
3768 u32 ack_vfs[VF_MAX_STATIC / 32];
3769 enum _ecore_status_t rc = ECORE_SUCCESS;
3772 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3774 /* Since BRB <-> PRS interface can't be tested as part of the flr
3775 * polling due to HW limitations, simply sleep a bit. And since
3776 * there's no need to wait per-vf, do it before looping.
3780 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3781 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3783 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3787 enum _ecore_status_t
3788 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3789 struct ecore_ptt *p_ptt, u16 rel_vf_id)
3791 u32 ack_vfs[VF_MAX_STATIC / 32];
3792 enum _ecore_status_t rc = ECORE_SUCCESS;
3794 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3796 /* Wait instead of polling the BRB <-> PRS interface */
3799 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3801 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3805 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3810 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3811 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3812 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3813 "[%08x,...,%08x]: %08x\n",
3814 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3816 if (!p_hwfn->p_dev->p_iov_info) {
3817 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3822 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3823 struct ecore_vf_info *p_vf;
3826 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
3830 vfid = p_vf->abs_vf_id;
3831 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3832 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3833 u16 rel_vf_id = p_vf->relative_vf_id;
3835 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3836 "VF[%d] [rel %d] got FLR-ed\n",
3839 p_vf->state = VF_RESET;
3841 /* No need to lock here, since pending_flr should
3842 * only change here and before ACKing MFw. Since
3843 * MFW will not trigger an additional attention for
3844 * VF flr until ACKs, we're safe.
3846 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3854 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
3856 struct ecore_mcp_link_params *p_params,
3857 struct ecore_mcp_link_state *p_link,
3858 struct ecore_mcp_link_capabilities *p_caps)
3860 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
3861 struct ecore_bulletin_content *p_bulletin;
3866 p_bulletin = p_vf->bulletin.p_virt;
3869 __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3871 __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3873 __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3876 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
3877 struct ecore_ptt *p_ptt, int vfid)
3879 struct ecore_iov_vf_mbx *mbx;
3880 struct ecore_vf_info *p_vf;
3882 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3886 mbx = &p_vf->vf_mbx;
3888 /* ecore_iov_process_mbx_request */
3891 "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
3893 mbx->first_tlv = mbx->req_virt->first_tlv;
3895 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
3896 p_vf->relative_vf_id,
3897 mbx->first_tlv.tl.type);
3899 /* Lock the per vf op mutex and note the locker's identity.
3900 * The unlock will take place in mbx response.
3902 ecore_iov_lock_vf_pf_channel(p_hwfn,
3903 p_vf, mbx->first_tlv.tl.type);
3905 /* check if tlv type is known */
3906 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3907 !p_vf->b_malicious) {
3908 /* switch on the opcode */
3909 switch (mbx->first_tlv.tl.type) {
3910 case CHANNEL_TLV_ACQUIRE:
3911 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3913 case CHANNEL_TLV_VPORT_START:
3914 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3916 case CHANNEL_TLV_VPORT_TEARDOWN:
3917 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3919 case CHANNEL_TLV_START_RXQ:
3920 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3922 case CHANNEL_TLV_START_TXQ:
3923 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3925 case CHANNEL_TLV_STOP_RXQS:
3926 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3928 case CHANNEL_TLV_STOP_TXQS:
3929 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3931 case CHANNEL_TLV_UPDATE_RXQ:
3932 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3934 case CHANNEL_TLV_VPORT_UPDATE:
3935 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3937 case CHANNEL_TLV_UCAST_FILTER:
3938 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3940 case CHANNEL_TLV_CLOSE:
3941 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3943 case CHANNEL_TLV_INT_CLEANUP:
3944 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3946 case CHANNEL_TLV_RELEASE:
3947 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3949 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3950 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3952 case CHANNEL_TLV_COALESCE_UPDATE:
3953 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3956 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3957 /* If we've received a message from a VF we consider malicious
3958 * we ignore the messasge unless it's one for RELEASE, in which
3959 * case we'll let it have the benefit of doubt, allowing the
3960 * next loaded driver to start again.
3962 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
3963 /* TODO - initiate FLR, remove malicious indication */
3964 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3965 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
3968 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3969 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3970 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3973 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3974 mbx->first_tlv.tl.type,
3975 sizeof(struct pfvf_def_resp_tlv),
3976 PFVF_STATUS_MALICIOUS);
3978 /* unknown TLV - this may belong to a VF driver from the future
3979 * - a version written after this PF driver was written, which
3980 * supports features unknown as of yet. Too bad since we don't
3981 * support them. Or this may be because someone wrote a crappy
3982 * VF driver and is sending garbage over the channel.
3984 DP_NOTICE(p_hwfn, false,
3985 "VF[%02x]: unknown TLV. type %04x length %04x"
3986 " padding %08x reply address %lu\n",
3988 mbx->first_tlv.tl.type,
3989 mbx->first_tlv.tl.length,
3990 mbx->first_tlv.padding,
3991 (unsigned long)mbx->first_tlv.reply_address);
3993 /* Try replying in case reply address matches the acquisition's
3996 if (p_vf->acquire.first_tlv.reply_address &&
3997 (mbx->first_tlv.reply_address ==
3998 p_vf->acquire.first_tlv.reply_address))
3999 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4000 mbx->first_tlv.tl.type,
4001 sizeof(struct pfvf_def_resp_tlv),
4002 PFVF_STATUS_NOT_SUPPORTED);
4004 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4005 "VF[%02x]: Can't respond to TLV -"
4006 " no valid reply address\n",
4010 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
4011 mbx->first_tlv.tl.type);
4013 #ifdef CONFIG_ECORE_SW_CHANNEL
4014 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
4015 mbx->sw_mbx.response_offset = 0;
4019 void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
4021 u64 add_bit = 1ULL << (vfid % 64);
4023 /* TODO - add locking mechanisms [no atomics in ecore, so we can't
4024 * add the lock inside the ecore_pf_iov struct].
4026 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
4029 void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
4032 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
4034 /* TODO - Take a lock */
4035 OSAL_MEMCPY(events, p_pending_events,
4036 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4037 OSAL_MEMSET(p_pending_events, 0,
4038 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4041 static struct ecore_vf_info *
4042 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
4044 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
4046 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4047 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4048 "Got indication for VF [abs 0x%08x] that cannot be"
4054 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
4057 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
4059 struct regpair *vf_msg)
4061 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
4065 return ECORE_SUCCESS;
4067 /* List the physical address of the request so that handler
4068 * could later on copy the message from it.
4070 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4072 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
4075 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
4076 struct malicious_vf_eqe_data *p_data)
4078 struct ecore_vf_info *p_vf;
4080 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
4086 "VF [%d] - Malicious behavior [%02x]\n",
4087 p_vf->abs_vf_id, p_data->errId);
4089 p_vf->b_malicious = true;
4091 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
4094 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
4097 union event_ring_data *data)
4100 case COMMON_EVENT_VF_PF_CHANNEL:
4101 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
4102 &data->vf_pf_channel.msg_addr);
4103 case COMMON_EVENT_VF_FLR:
4104 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4105 "VF-FLR is still not supported\n");
4106 return ECORE_SUCCESS;
4107 case COMMON_EVENT_MALICIOUS_VF:
4108 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4109 return ECORE_SUCCESS;
4111 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
4117 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4119 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4120 (1ULL << (rel_vf_id % 64)));
4123 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4125 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
4131 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4132 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4136 return E4_MAX_NUM_VFS;
4139 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4140 struct ecore_ptt *ptt, int vfid)
4142 struct ecore_dmae_params params;
4143 struct ecore_vf_info *vf_info;
4145 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4149 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
4150 params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
4151 params.src_vfid = vf_info->abs_vf_id;
4153 if (ecore_dmae_host2host(p_hwfn, ptt,
4154 vf_info->vf_mbx.pending_req,
4155 vf_info->vf_mbx.req_phys,
4156 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4157 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4158 "Failed to copy message from VF 0x%02x\n", vfid);
4163 return ECORE_SUCCESS;
4166 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4169 struct ecore_vf_info *vf_info;
4172 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4174 DP_NOTICE(p_hwfn->p_dev, true,
4175 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4178 if (vf_info->b_malicious) {
4179 DP_NOTICE(p_hwfn->p_dev, false,
4180 "Can't set forced MAC to malicious VF [%d]\n",
4185 feature = 1 << MAC_ADDR_FORCED;
4186 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4188 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4189 /* Forced MAC will disable MAC_ADDR */
4190 vf_info->bulletin.p_virt->valid_bitmap &=
4191 ~(1 << VFPF_BULLETIN_MAC_ADDR);
4193 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4196 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4199 struct ecore_vf_info *vf_info;
4202 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4204 DP_NOTICE(p_hwfn->p_dev, true,
4205 "Can not set MAC, invalid vfid [%d]\n", vfid);
4208 if (vf_info->b_malicious) {
4209 DP_NOTICE(p_hwfn->p_dev, false,
4210 "Can't set MAC to malicious VF [%d]\n",
4215 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4216 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4217 "Can not set MAC, Forced MAC is configured\n");
4221 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4222 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4224 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4226 return ECORE_SUCCESS;
4229 enum _ecore_status_t
4230 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4231 bool b_untagged_only, int vfid)
4233 struct ecore_vf_info *vf_info;
4236 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4238 DP_NOTICE(p_hwfn->p_dev, true,
4239 "Can not set untagged default, invalid vfid [%d]\n",
4243 if (vf_info->b_malicious) {
4244 DP_NOTICE(p_hwfn->p_dev, false,
4245 "Can't set untagged default to malicious VF [%d]\n",
4250 /* Since this is configurable only during vport-start, don't take it
4251 * if we're past that point.
4253 if (vf_info->state == VF_ENABLED) {
4254 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4255 "Can't support untagged change for vfid[%d] -"
4256 " VF is already active\n",
4261 /* Set configuration; This will later be taken into account during the
4262 * VF initialization.
4264 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4265 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4266 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4268 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4271 return ECORE_SUCCESS;
4274 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4277 struct ecore_vf_info *vf_info;
4279 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4283 *opaque_fid = vf_info->opaque_fid;
4286 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4289 struct ecore_vf_info *vf_info;
4292 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4294 DP_NOTICE(p_hwfn->p_dev, true,
4295 "Can not set forced MAC, invalid vfid [%d]\n",
4299 if (vf_info->b_malicious) {
4300 DP_NOTICE(p_hwfn->p_dev, false,
4301 "Can't set forced vlan to malicious VF [%d]\n",
4306 feature = 1 << VLAN_ADDR_FORCED;
4307 vf_info->bulletin.p_virt->pvid = pvid;
4309 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4311 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4313 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4316 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4317 int vfid, u16 vxlan_port, u16 geneve_port)
4319 struct ecore_vf_info *vf_info;
4321 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4323 DP_NOTICE(p_hwfn->p_dev, true,
4324 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4328 if (vf_info->b_malicious) {
4329 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4330 "Can not set udp ports to malicious VF [%d]\n",
4335 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4336 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4339 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4341 struct ecore_vf_info *p_vf_info;
4343 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4347 return !!p_vf_info->vport_instance;
4350 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4352 struct ecore_vf_info *p_vf_info;
4354 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4358 return p_vf_info->state == VF_STOPPED;
4361 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4363 struct ecore_vf_info *vf_info;
4365 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4369 return vf_info->spoof_chk;
4372 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4375 struct ecore_vf_info *vf;
4376 enum _ecore_status_t rc = ECORE_INVAL;
4378 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4379 DP_NOTICE(p_hwfn, true,
4380 "SR-IOV sanity check failed, can't set spoofchk\n");
4384 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4388 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4389 /* After VF VPORT start PF will configure spoof check */
4390 vf->req_spoofchk_val = val;
4395 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4401 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4403 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4405 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4406 : ECORE_MAX_VF_CHAINS_PER_PF;
4408 return max_chains_per_vf;
4411 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4413 void **pp_req_virt_addr,
4414 u16 *p_req_virt_size)
4416 struct ecore_vf_info *vf_info =
4417 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4422 if (pp_req_virt_addr)
4423 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4425 if (p_req_virt_size)
4426 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4429 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4431 void **pp_reply_virt_addr,
4432 u16 *p_reply_virt_size)
4434 struct ecore_vf_info *vf_info =
4435 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4440 if (pp_reply_virt_addr)
4441 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4443 if (p_reply_virt_size)
4444 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4447 #ifdef CONFIG_ECORE_SW_CHANNEL
4448 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4451 struct ecore_vf_info *vf_info =
4452 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4457 return &vf_info->vf_mbx.sw_mbx;
4461 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4463 return (length >= sizeof(struct vfpf_first_tlv) &&
4464 (length <= sizeof(union vfpf_tlvs)));
4467 u32 ecore_iov_pfvf_msg_length(void)
4469 return sizeof(union pfvf_tlvs);
4472 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4474 struct ecore_vf_info *p_vf;
4476 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4477 if (!p_vf || !p_vf->bulletin.p_virt)
4480 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4483 return p_vf->bulletin.p_virt->mac;
4486 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4489 struct ecore_vf_info *p_vf;
4491 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4492 if (!p_vf || !p_vf->bulletin.p_virt)
4495 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4498 return p_vf->bulletin.p_virt->pvid;
4501 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4502 struct ecore_ptt *p_ptt,
4505 struct ecore_vf_info *vf;
4507 enum _ecore_status_t rc;
4509 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4514 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4515 if (rc != ECORE_SUCCESS)
4518 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
4521 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4522 struct ecore_ptt *p_ptt,
4524 struct ecore_eth_stats *p_stats)
4526 struct ecore_vf_info *vf;
4528 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4532 if (vf->state != VF_ENABLED)
4535 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4536 vf->abs_vf_id + 0x10, false);
4538 return ECORE_SUCCESS;
4541 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4543 struct ecore_vf_info *p_vf;
4545 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4549 return p_vf->num_rxqs;
4552 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4554 struct ecore_vf_info *p_vf;
4556 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4560 return p_vf->num_active_rxqs;
4563 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4565 struct ecore_vf_info *p_vf;
4567 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4574 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4576 struct ecore_vf_info *p_vf;
4578 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4582 return p_vf->num_sbs;
4585 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4587 struct ecore_vf_info *p_vf;
4589 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4593 return (p_vf->state == VF_FREE);
4596 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4599 struct ecore_vf_info *p_vf;
4601 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4605 return (p_vf->state == VF_ACQUIRED);
4608 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4610 struct ecore_vf_info *p_vf;
4612 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4616 return (p_vf->state == VF_ENABLED);
4619 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4622 struct ecore_vf_info *p_vf;
4624 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4628 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4631 enum _ecore_status_t
4632 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4634 struct ecore_wfq_data *vf_vp_wfq;
4635 struct ecore_vf_info *vf_info;
4637 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4641 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4643 if (vf_vp_wfq->configured)
4644 return vf_vp_wfq->min_speed;