2 * Copyright (c) 2016 - 2018 Cavium Inc.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
30 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
33 union event_ring_data *data,
36 const char *ecore_channel_tlvs_string[] = {
37 "CHANNEL_TLV_NONE", /* ends tlv sequence */
38 "CHANNEL_TLV_ACQUIRE",
39 "CHANNEL_TLV_VPORT_START",
40 "CHANNEL_TLV_VPORT_UPDATE",
41 "CHANNEL_TLV_VPORT_TEARDOWN",
42 "CHANNEL_TLV_START_RXQ",
43 "CHANNEL_TLV_START_TXQ",
44 "CHANNEL_TLV_STOP_RXQ",
45 "CHANNEL_TLV_STOP_TXQ",
46 "CHANNEL_TLV_UPDATE_RXQ",
47 "CHANNEL_TLV_INT_CLEANUP",
49 "CHANNEL_TLV_RELEASE",
50 "CHANNEL_TLV_LIST_END",
51 "CHANNEL_TLV_UCAST_FILTER",
52 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
53 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
54 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
55 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
56 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
57 "CHANNEL_TLV_VPORT_UPDATE_RSS",
58 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
59 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
60 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
61 "CHANNEL_TLV_COALESCE_UPDATE",
63 "CHANNEL_TLV_COALESCE_READ",
64 "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
65 "CHANNEL_TLV_UPDATE_MTU",
69 static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
73 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
74 ETH_HSI_VER_NO_PKT_LEN_TUNN)
75 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
77 if (!(p_vf->acquire.vfdev_info.capabilities &
78 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
79 legacy |= ECORE_QCID_LEGACY_VF_CID;
85 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
86 struct ecore_vf_info *p_vf)
88 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
89 struct ecore_spq_entry *p_ent = OSAL_NULL;
90 struct ecore_sp_init_data init_data;
91 enum _ecore_status_t rc = ECORE_NOTIMPL;
95 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
96 init_data.cid = ecore_spq_get_cid(p_hwfn);
97 init_data.opaque_fid = p_vf->opaque_fid;
98 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
100 rc = ecore_sp_init_request(p_hwfn, &p_ent,
101 COMMON_RAMROD_VF_START,
102 PROTOCOLID_COMMON, &init_data);
103 if (rc != ECORE_SUCCESS)
106 p_ramrod = &p_ent->ramrod.vf_start;
108 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
109 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
111 switch (p_hwfn->hw_info.personality) {
113 p_ramrod->personality = PERSONALITY_ETH;
115 case ECORE_PCI_ETH_ROCE:
116 case ECORE_PCI_ETH_IWARP:
117 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
120 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
121 p_hwfn->hw_info.personality);
125 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
126 if (fp_minor > ETH_HSI_VER_MINOR &&
127 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
128 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
129 "VF [%d] - Requested fp hsi %02x.%02x which is"
130 " slightly newer than PF's %02x.%02x; Configuring"
133 ETH_HSI_VER_MAJOR, fp_minor,
134 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
135 fp_minor = ETH_HSI_VER_MINOR;
138 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
139 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
141 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
142 "VF[%d] - Starting using HSI %02x.%02x\n",
143 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
145 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
148 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
152 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
153 struct ecore_spq_entry *p_ent = OSAL_NULL;
154 struct ecore_sp_init_data init_data;
155 enum _ecore_status_t rc = ECORE_NOTIMPL;
158 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
159 init_data.cid = ecore_spq_get_cid(p_hwfn);
160 init_data.opaque_fid = opaque_vfid;
161 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
163 rc = ecore_sp_init_request(p_hwfn, &p_ent,
164 COMMON_RAMROD_VF_STOP,
165 PROTOCOLID_COMMON, &init_data);
166 if (rc != ECORE_SUCCESS)
169 p_ramrod = &p_ent->ramrod.vf_stop;
171 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
173 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
176 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
177 bool b_enabled_only, bool b_non_malicious)
179 if (!p_hwfn->pf_iov_info) {
180 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
184 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
188 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
192 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
199 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
203 struct ecore_vf_info *vf = OSAL_NULL;
205 if (!p_hwfn->pf_iov_info) {
206 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
210 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
211 b_enabled_only, false))
212 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
214 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
220 static struct ecore_queue_cid *
221 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
225 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
226 if (p_queue->cids[i].p_cid &&
227 !p_queue->cids[i].b_is_tx)
228 return p_queue->cids[i].p_cid;
234 enum ecore_iov_validate_q_mode {
235 ECORE_IOV_VALIDATE_Q_NA,
236 ECORE_IOV_VALIDATE_Q_ENABLE,
237 ECORE_IOV_VALIDATE_Q_DISABLE,
240 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
242 enum ecore_iov_validate_q_mode mode,
247 if (mode == ECORE_IOV_VALIDATE_Q_NA)
250 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
251 struct ecore_vf_queue_cid *p_qcid;
253 p_qcid = &p_vf->vf_queues[qid].cids[i];
255 if (p_qcid->p_cid == OSAL_NULL)
258 if (p_qcid->b_is_tx != b_is_tx)
261 /* Found. It's enabled. */
262 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
265 /* In case we haven't found any valid cid, then its disabled */
266 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
269 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
270 struct ecore_vf_info *p_vf,
272 enum ecore_iov_validate_q_mode mode)
274 if (rx_qid >= p_vf->num_rxqs) {
275 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
276 "VF[0x%02x] - can't touch Rx queue[%04x];"
277 " Only 0x%04x are allocated\n",
278 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
282 return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
285 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
286 struct ecore_vf_info *p_vf,
288 enum ecore_iov_validate_q_mode mode)
290 if (tx_qid >= p_vf->num_txqs) {
291 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
292 "VF[0x%02x] - can't touch Tx queue[%04x];"
293 " Only 0x%04x are allocated\n",
294 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
298 return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
301 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
302 struct ecore_vf_info *p_vf,
307 for (i = 0; i < p_vf->num_sbs; i++)
308 if (p_vf->igu_sbs[i] == sb_idx)
311 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
312 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
313 " one of its 0x%02x SBs\n",
314 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
319 /* Is there at least 1 queue open? */
320 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
324 for (i = 0; i < p_vf->num_rxqs; i++)
325 if (ecore_iov_validate_queue_mode(p_vf, i,
326 ECORE_IOV_VALIDATE_Q_ENABLE,
333 static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
337 for (i = 0; i < p_vf->num_txqs; i++)
338 if (ecore_iov_validate_queue_mode(p_vf, i,
339 ECORE_IOV_VALIDATE_Q_ENABLE,
346 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
348 struct ecore_ptt *p_ptt)
350 struct ecore_bulletin_content *p_bulletin;
351 int crc_size = sizeof(p_bulletin->crc);
352 struct ecore_dmae_params params;
353 struct ecore_vf_info *p_vf;
355 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
359 /* TODO - check VF is in a state where it can accept message */
360 if (!p_vf->vf_bulletin)
363 p_bulletin = p_vf->bulletin.p_virt;
365 /* Increment bulletin board version and compute crc */
366 p_bulletin->version++;
367 p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
368 p_vf->bulletin.size - crc_size);
370 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
371 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
372 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
374 /* propagate bulletin board via dmae to vm memory */
375 OSAL_MEMSET(¶ms, 0, sizeof(params));
376 params.flags = ECORE_DMAE_FLAG_VF_DST;
377 params.dst_vfid = p_vf->abs_vf_id;
378 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
379 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
383 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
385 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
388 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
389 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
391 OSAL_PCI_READ_CONFIG_WORD(p_dev,
392 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
393 OSAL_PCI_READ_CONFIG_WORD(p_dev,
394 pos + PCI_SRIOV_INITIAL_VF,
397 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
399 /* @@@TODO - in future we might want to add an OSAL here to
400 * allow each OS to decide on its own how to act.
402 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
403 "Number of VFs are already set to non-zero value."
404 " Ignoring PCI configuration value\n");
408 OSAL_PCI_READ_CONFIG_WORD(p_dev,
409 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
411 OSAL_PCI_READ_CONFIG_WORD(p_dev,
412 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
414 OSAL_PCI_READ_CONFIG_WORD(p_dev,
415 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
417 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
418 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
420 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
422 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
424 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
425 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
426 " stride %d, page size 0x%x\n",
427 iov->nres, iov->cap, iov->ctrl,
428 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
429 iov->offset, iov->stride, iov->pgsz);
431 /* Some sanity checks */
432 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
433 iov->total_vfs > NUM_OF_VFS(p_dev)) {
434 /* This can happen only due to a bug. In this case we set
435 * num_vfs to zero to avoid memory corruption in the code that
436 * assumes max number of vfs
438 DP_NOTICE(p_dev, false,
439 "IOV: Unexpected number of vfs set: %d"
440 " setting num_vf to zero\n",
447 return ECORE_SUCCESS;
450 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
452 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
453 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
454 struct ecore_bulletin_content *p_bulletin_virt;
455 dma_addr_t req_p, rply_p, bulletin_p;
456 union pfvf_tlvs *p_reply_virt_addr;
457 union vfpf_tlvs *p_req_virt_addr;
460 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
462 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
463 req_p = p_iov_info->mbx_msg_phys_addr;
464 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
465 rply_p = p_iov_info->mbx_reply_phys_addr;
466 p_bulletin_virt = p_iov_info->p_bulletins;
467 bulletin_p = p_iov_info->bulletins_phys;
468 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
470 "ecore_iov_setup_vfdb called without alloc mem first\n");
474 for (idx = 0; idx < p_iov->total_vfs; idx++) {
475 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
478 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
479 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
480 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
481 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
483 #ifdef CONFIG_ECORE_SW_CHANNEL
484 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
485 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
487 vf->state = VF_STOPPED;
490 vf->bulletin.phys = idx *
491 sizeof(struct ecore_bulletin_content) + bulletin_p;
492 vf->bulletin.p_virt = p_bulletin_virt + idx;
493 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
495 vf->relative_vf_id = idx;
496 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
497 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
498 vf->concrete_fid = concrete;
499 /* TODO - need to devise a better way of getting opaque */
500 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
501 (vf->abs_vf_id << 8);
503 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
504 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
508 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
510 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
514 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
516 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
517 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
519 /* Allocate PF Mailbox buffer (per-VF) */
520 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
521 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
522 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
523 &p_iov_info->mbx_msg_phys_addr,
524 p_iov_info->mbx_msg_size);
528 /* Allocate PF Mailbox Reply buffer (per-VF) */
529 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
530 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
531 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
532 &p_iov_info->mbx_reply_phys_addr,
533 p_iov_info->mbx_reply_size);
537 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
539 p_v_addr = &p_iov_info->p_bulletins;
540 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
541 &p_iov_info->bulletins_phys,
542 p_iov_info->bulletins_size);
546 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
547 "PF's Requests mailbox [%p virt 0x%lx phys], "
548 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
549 " [%p virt 0x%lx phys]\n",
550 p_iov_info->mbx_msg_virt_addr,
551 (unsigned long)p_iov_info->mbx_msg_phys_addr,
552 p_iov_info->mbx_reply_virt_addr,
553 (unsigned long)p_iov_info->mbx_reply_phys_addr,
554 p_iov_info->p_bulletins,
555 (unsigned long)p_iov_info->bulletins_phys);
557 return ECORE_SUCCESS;
560 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
562 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
564 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
565 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
566 p_iov_info->mbx_msg_virt_addr,
567 p_iov_info->mbx_msg_phys_addr,
568 p_iov_info->mbx_msg_size);
570 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
571 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
572 p_iov_info->mbx_reply_virt_addr,
573 p_iov_info->mbx_reply_phys_addr,
574 p_iov_info->mbx_reply_size);
576 if (p_iov_info->p_bulletins)
577 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
578 p_iov_info->p_bulletins,
579 p_iov_info->bulletins_phys,
580 p_iov_info->bulletins_size);
583 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
585 struct ecore_pf_iov *p_sriov;
587 if (!IS_PF_SRIOV(p_hwfn)) {
588 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
589 "No SR-IOV - no need for IOV db\n");
590 return ECORE_SUCCESS;
593 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
595 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
599 p_hwfn->pf_iov_info = p_sriov;
601 ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
602 ecore_sriov_eqe_event);
604 return ecore_iov_allocate_vfdb(p_hwfn);
607 void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
609 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
612 ecore_iov_setup_vfdb(p_hwfn);
615 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
617 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
619 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
620 ecore_iov_free_vfdb(p_hwfn);
621 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
625 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
627 OSAL_FREE(p_dev, p_dev->p_iov_info);
630 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
632 struct ecore_dev *p_dev = p_hwfn->p_dev;
634 enum _ecore_status_t rc;
636 if (IS_VF(p_hwfn->p_dev))
637 return ECORE_SUCCESS;
639 /* Learn the PCI configuration */
640 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
641 PCI_EXT_CAP_ID_SRIOV);
643 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
644 return ECORE_SUCCESS;
647 /* Allocate a new struct for IOV information */
648 /* TODO - can change to VALLOC when its available */
649 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
650 sizeof(*p_dev->p_iov_info));
651 if (!p_dev->p_iov_info) {
652 DP_NOTICE(p_hwfn, false,
653 "Can't support IOV due to lack of memory\n");
656 p_dev->p_iov_info->pos = pos;
658 rc = ecore_iov_pci_cfg_info(p_dev);
662 /* We want PF IOV to be synonemous with the existence of p_iov_info;
663 * In case the capability is published but there are no VFs, simply
664 * de-allocate the struct.
666 if (!p_dev->p_iov_info->total_vfs) {
667 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
668 "IOV capabilities, but no VFs are published\n");
669 OSAL_FREE(p_dev, p_dev->p_iov_info);
670 return ECORE_SUCCESS;
673 /* First VF index based on offset is tricky:
674 * - If ARI is supported [likely], offset - (16 - pf_id) would
675 * provide the number for eng0. 2nd engine Vfs would begin
676 * after the first engine's VFs.
677 * - If !ARI, VFs would start on next device.
678 * so offset - (256 - pf_id) would provide the number.
679 * Utilize the fact that (256 - pf_id) is achieved only be later
680 * to diffrentiate between the two.
683 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
684 u32 first = p_hwfn->p_dev->p_iov_info->offset +
685 p_hwfn->abs_pf_id - 16;
687 p_dev->p_iov_info->first_vf_in_pf = first;
689 if (ECORE_PATH_ID(p_hwfn))
690 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
692 u32 first = p_hwfn->p_dev->p_iov_info->offset +
693 p_hwfn->abs_pf_id - 256;
695 p_dev->p_iov_info->first_vf_in_pf = first;
698 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
699 "First VF in hwfn 0x%08x\n",
700 p_dev->p_iov_info->first_vf_in_pf);
702 return ECORE_SUCCESS;
705 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
706 bool b_fail_malicious)
708 /* Check PF supports sriov */
709 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
710 !IS_PF_SRIOV_ALLOC(p_hwfn))
713 /* Check VF validity */
714 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
720 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
722 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
725 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
726 u16 rel_vf_id, u8 to_disable)
728 struct ecore_vf_info *vf;
731 for_each_hwfn(p_dev, i) {
732 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
734 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
738 vf->to_disable = to_disable;
742 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
747 if (!IS_ECORE_SRIOV(p_dev))
750 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
751 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
755 /* @@@TBD Consider taking outside of ecore... */
756 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
760 enum _ecore_status_t rc = ECORE_SUCCESS;
761 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
763 if (vf != OSAL_NULL) {
765 #ifdef CONFIG_ECORE_SW_CHANNEL
766 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
769 rc = ECORE_UNKNOWN_ERROR;
775 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
776 struct ecore_ptt *p_ptt,
779 ecore_wr(p_hwfn, p_ptt,
780 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
781 1 << (abs_vfid & 0x1f));
784 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
785 struct ecore_ptt *p_ptt,
786 struct ecore_vf_info *vf)
790 /* Set VF masks and configuration - pretend */
791 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
793 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
796 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
798 /* iterate over all queues, clear sb consumer */
799 for (i = 0; i < vf->num_sbs; i++)
800 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
802 vf->opaque_fid, true);
805 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
806 struct ecore_ptt *p_ptt,
807 struct ecore_vf_info *vf, bool enable)
811 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
813 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
816 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
818 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
820 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
823 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
826 static enum _ecore_status_t
827 ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
828 struct ecore_ptt *p_ptt,
835 /* If client overrides this, don't do anything */
836 if (p_hwfn->p_dev->b_dont_override_vf_msix)
837 return ECORE_SUCCESS;
839 /* For AH onward, configuration is per-PF. Find maximum of all
840 * the currently enabled child VFs, and set the number to be that.
842 if (!ECORE_IS_BB(p_hwfn->p_dev)) {
843 ecore_for_each_vf(p_hwfn, i) {
844 struct ecore_vf_info *p_vf;
846 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
850 current_max = OSAL_MAX_T(u8, current_max,
855 if (num_sbs > current_max)
856 return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
859 return ECORE_SUCCESS;
862 static enum _ecore_status_t
863 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
864 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
866 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
867 enum _ecore_status_t rc = ECORE_SUCCESS;
869 /* It's possible VF was previously considered malicious -
870 * clear the indication even if we're only going to disable VF.
872 vf->b_malicious = false;
875 return ECORE_SUCCESS;
877 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
878 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
879 ECORE_VF_ABS_ID(p_hwfn, vf));
881 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
882 ECORE_VF_ABS_ID(p_hwfn, vf));
884 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
886 rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
887 vf->abs_vf_id, vf->num_sbs);
888 if (rc != ECORE_SUCCESS)
891 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
893 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
894 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
896 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
897 p_hwfn->hw_info.hw_mode);
900 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
909 * @brief ecore_iov_config_perm_table - configure the permission
911 * In E4, queue zone permission table size is 320x9. There
912 * are 320 VF queues for single engine device (256 for dual
913 * engine device), and each entry has the following format:
920 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
921 struct ecore_ptt *p_ptt,
922 struct ecore_vf_info *vf, u8 enable)
928 for (qid = 0; qid < vf->num_rxqs; qid++) {
929 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
932 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
933 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
934 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
938 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
939 struct ecore_ptt *p_ptt,
940 struct ecore_vf_info *vf)
942 /* Reset vf in IGU - interrupts are still disabled */
943 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
945 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
947 /* Permission Table */
948 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
951 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
952 struct ecore_ptt *p_ptt,
953 struct ecore_vf_info *vf,
956 struct ecore_igu_block *p_block;
957 struct cau_sb_entry sb_entry;
961 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
963 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
964 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
966 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
967 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
968 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
970 for (qid = 0; qid < num_rx_queues; qid++) {
971 p_block = ecore_get_igu_free_sb(p_hwfn, false);
972 vf->igu_sbs[qid] = p_block->igu_sb_id;
973 p_block->status &= ~ECORE_IGU_STATUS_FREE;
974 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
976 ecore_wr(p_hwfn, p_ptt,
977 IGU_REG_MAPPING_MEMORY +
978 sizeof(u32) * p_block->igu_sb_id, val);
980 /* Configure igu sb in CAU which were marked valid */
981 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
984 ecore_dmae_host2grc(p_hwfn, p_ptt,
985 (u64)(osal_uintptr_t)&sb_entry,
986 CAU_REG_SB_VAR_MEMORY +
987 p_block->igu_sb_id * sizeof(u64), 2, 0);
990 vf->num_sbs = (u8)num_rx_queues;
997 * @brief The function invalidates all the VF entries,
998 * technically this isn't required, but added for
999 * cleaness and ease of debugging incase a VF attempts to
1000 * produce an interrupt after it has been taken down.
1006 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
1007 struct ecore_ptt *p_ptt,
1008 struct ecore_vf_info *vf)
1010 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1014 /* Invalidate igu CAM lines and mark them as free */
1015 for (idx = 0; idx < vf->num_sbs; idx++) {
1016 igu_id = vf->igu_sbs[idx];
1017 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
1019 val = ecore_rd(p_hwfn, p_ptt, addr);
1020 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1021 ecore_wr(p_hwfn, p_ptt, addr, val);
1023 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
1024 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
1030 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1032 struct ecore_mcp_link_params *params,
1033 struct ecore_mcp_link_state *link,
1034 struct ecore_mcp_link_capabilities *p_caps)
1036 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1037 struct ecore_bulletin_content *p_bulletin;
1042 p_bulletin = p_vf->bulletin.p_virt;
1043 p_bulletin->req_autoneg = params->speed.autoneg;
1044 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1045 p_bulletin->req_forced_speed = params->speed.forced_speed;
1046 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1047 p_bulletin->req_forced_rx = params->pause.forced_rx;
1048 p_bulletin->req_forced_tx = params->pause.forced_tx;
1049 p_bulletin->req_loopback = params->loopback_mode;
1051 p_bulletin->link_up = link->link_up;
1052 p_bulletin->speed = link->speed;
1053 p_bulletin->full_duplex = link->full_duplex;
1054 p_bulletin->autoneg = link->an;
1055 p_bulletin->autoneg_complete = link->an_complete;
1056 p_bulletin->parallel_detection = link->parallel_detection;
1057 p_bulletin->pfc_enabled = link->pfc_enabled;
1058 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1059 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1060 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1061 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1062 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1064 p_bulletin->capability_speed = p_caps->speed_capabilities;
1067 enum _ecore_status_t
1068 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1069 struct ecore_ptt *p_ptt,
1070 struct ecore_iov_vf_init_params *p_params)
1072 struct ecore_mcp_link_capabilities link_caps;
1073 struct ecore_mcp_link_params link_params;
1074 struct ecore_mcp_link_state link_state;
1075 u8 num_of_vf_available_chains = 0;
1076 struct ecore_vf_info *vf = OSAL_NULL;
1078 enum _ecore_status_t rc = ECORE_SUCCESS;
1082 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1084 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1085 return ECORE_UNKNOWN_ERROR;
1089 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1090 p_params->rel_vf_id);
1094 /* Perform sanity checking on the requested vport/rss */
1095 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1096 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1097 p_params->rel_vf_id, p_params->vport_id);
1101 if ((p_params->num_queues > 1) &&
1102 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1103 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1104 p_params->rel_vf_id, p_params->rss_eng_id);
1108 /* TODO - remove this once we get confidence of change */
1109 if (!p_params->vport_id) {
1110 DP_NOTICE(p_hwfn, false,
1111 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1112 p_params->rel_vf_id);
1114 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1115 DP_NOTICE(p_hwfn, false,
1116 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1117 p_params->rel_vf_id);
1119 vf->vport_id = p_params->vport_id;
1120 vf->rss_eng_id = p_params->rss_eng_id;
1122 /* Since it's possible to relocate SBs, it's a bit difficult to check
1123 * things here. Simply check whether the index falls in the range
1124 * belonging to the PF.
1126 for (i = 0; i < p_params->num_queues; i++) {
1127 qid = p_params->req_rx_queue[i];
1128 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1129 DP_NOTICE(p_hwfn, true,
1130 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1131 qid, p_params->rel_vf_id,
1132 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1136 qid = p_params->req_tx_queue[i];
1137 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1138 DP_NOTICE(p_hwfn, true,
1139 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1140 qid, p_params->rel_vf_id,
1141 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1146 /* Limit number of queues according to number of CIDs */
1147 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1148 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1149 "VF[%d] - requesting to initialize for 0x%04x queues"
1150 " [0x%04x CIDs available]\n",
1151 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1152 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1154 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1158 if (num_of_vf_available_chains == 0) {
1159 DP_ERR(p_hwfn, "no available igu sbs\n");
1163 /* Choose queue number and index ranges */
1164 vf->num_rxqs = num_of_vf_available_chains;
1165 vf->num_txqs = num_of_vf_available_chains;
1167 for (i = 0; i < vf->num_rxqs; i++) {
1168 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1170 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1171 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1173 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1174 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1175 vf->relative_vf_id, i, vf->igu_sbs[i],
1176 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1179 /* Update the link configuration in bulletin.
1181 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1182 sizeof(link_params));
1183 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1184 sizeof(link_state));
1185 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1187 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1188 &link_params, &link_state, &link_caps);
1190 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1192 if (rc == ECORE_SUCCESS) {
1194 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1195 (1ULL << (vf->relative_vf_id % 64));
1197 if (IS_LEAD_HWFN(p_hwfn))
1198 p_hwfn->p_dev->p_iov_info->num_vfs++;
1204 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1205 struct ecore_ptt *p_ptt,
1208 struct ecore_mcp_link_capabilities caps;
1209 struct ecore_mcp_link_params params;
1210 struct ecore_mcp_link_state link;
1211 struct ecore_vf_info *vf = OSAL_NULL;
1213 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1215 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1216 return ECORE_UNKNOWN_ERROR;
1219 if (vf->bulletin.p_virt)
1220 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1221 sizeof(*vf->bulletin.p_virt));
1223 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1225 /* Get the link configuration back in bulletin so
1226 * that when VFs are re-enabled they get the actual
1227 * link configuration.
1229 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1230 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1231 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1233 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1235 /* Forget the VF's acquisition message */
1236 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1238 /* disablng interrupts and resetting permission table was done during
1239 * vf-close, however, we could get here without going through vf_close
1241 /* Disable Interrupts for VF */
1242 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1244 /* Reset Permission table */
1245 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1249 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1253 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1254 ~(1ULL << (vf->relative_vf_id / 64));
1256 if (IS_LEAD_HWFN(p_hwfn))
1257 p_hwfn->p_dev->p_iov_info->num_vfs--;
1260 return ECORE_SUCCESS;
1263 static bool ecore_iov_tlv_supported(u16 tlvtype)
1265 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1268 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1269 struct ecore_vf_info *vf, u16 tlv)
1271 /* lock the channel */
1272 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1274 /* record the locking op */
1275 /* vf->op_current = tlv; @@@TBD MichalK */
1278 if (ecore_iov_tlv_supported(tlv))
1281 "VF[%d]: vf pf channel locked by %s\n",
1283 ecore_channel_tlvs_string[tlv]);
1287 "VF[%d]: vf pf channel locked by %04x\n",
1288 vf->abs_vf_id, tlv);
1291 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1292 struct ecore_vf_info *vf,
1295 /* log the unlock */
1296 if (ecore_iov_tlv_supported(expected_tlv))
1299 "VF[%d]: vf pf channel unlocked by %s\n",
1301 ecore_channel_tlvs_string[expected_tlv]);
1305 "VF[%d]: vf pf channel unlocked by %04x\n",
1306 vf->abs_vf_id, expected_tlv);
1308 /* record the locking op */
1309 /* vf->op_current = CHANNEL_TLV_NONE; */
1312 /* place a given tlv on the tlv buffer, continuing current tlv list */
1313 void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
1315 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1318 tl->length = length;
1320 /* Offset should keep pointing to next TLV (the end of the last) */
1323 /* Return a pointer to the start of the added tlv */
1324 return *offset - length;
1327 /* list the types and lengths of the tlvs on the buffer */
1328 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1330 u16 i = 1, total_length = 0;
1331 struct channel_tlv *tlv;
1334 /* cast current tlv list entry to channel tlv header */
1335 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1338 if (ecore_iov_tlv_supported(tlv->type))
1339 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1340 "TLV number %d: type %s, length %d\n",
1341 i, ecore_channel_tlvs_string[tlv->type],
1344 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1345 "TLV number %d: type %d, length %d\n",
1346 i, tlv->type, tlv->length);
1348 if (tlv->type == CHANNEL_TLV_LIST_END)
1351 /* Validate entry - protect against malicious VFs */
1353 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1356 total_length += tlv->length;
1357 if (total_length >= sizeof(struct tlv_buffer_size)) {
1358 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1366 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1367 struct ecore_ptt *p_ptt,
1368 struct ecore_vf_info *p_vf,
1369 #ifdef CONFIG_ECORE_SW_CHANNEL
1372 u16 OSAL_UNUSED length,
1376 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1377 struct ecore_dmae_params params;
1380 mbx->reply_virt->default_resp.hdr.status = status;
1382 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1384 #ifdef CONFIG_ECORE_SW_CHANNEL
1385 mbx->sw_mbx.response_size =
1386 length + sizeof(struct channel_list_end_tlv);
1388 if (!p_vf->b_hw_channel)
1392 eng_vf_id = p_vf->abs_vf_id;
1394 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1395 params.flags = ECORE_DMAE_FLAG_VF_DST;
1396 params.dst_vfid = eng_vf_id;
1398 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1399 mbx->req_virt->first_tlv.reply_address +
1401 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1404 /* Once PF copies the rc to the VF, the latter can continue and
1405 * and send an additional message. So we have to make sure the
1406 * channel would be re-set to ready prior to that.
1409 GTT_BAR0_MAP_REG_USDM_RAM +
1410 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1412 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1413 mbx->req_virt->first_tlv.reply_address,
1414 sizeof(u64) / 4, ¶ms);
1416 OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
1419 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
1422 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1423 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1424 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1425 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1426 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1427 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1428 case ECORE_IOV_VP_UPDATE_MCAST:
1429 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1430 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1431 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1432 case ECORE_IOV_VP_UPDATE_RSS:
1433 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1434 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1435 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1436 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1437 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1443 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1444 struct ecore_vf_info *p_vf,
1445 struct ecore_iov_vf_mbx *p_mbx,
1446 u8 status, u16 tlvs_mask,
1449 struct pfvf_def_resp_tlv *resp;
1450 u16 size, total_len, i;
1452 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1453 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1454 size = sizeof(struct pfvf_def_resp_tlv);
1457 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1459 /* Prepare response for all extended tlvs if they are found by PF */
1460 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1461 if (!(tlvs_mask & (1 << i)))
1464 resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
1467 if (tlvs_accepted & (1 << i))
1468 resp->hdr.status = status;
1470 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1472 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1473 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1474 p_vf->relative_vf_id,
1475 ecore_iov_vport_to_tlv(i),
1481 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
1482 sizeof(struct channel_list_end_tlv));
1487 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1488 struct ecore_ptt *p_ptt,
1489 struct ecore_vf_info *vf_info,
1490 u16 type, u16 length, u8 status)
1492 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1494 mbx->offset = (u8 *)mbx->reply_virt;
1496 ecore_add_tlv(&mbx->offset, type, length);
1497 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
1498 sizeof(struct channel_list_end_tlv));
1500 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1503 struct ecore_public_vf_info
1504 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1506 bool b_enabled_only)
1508 struct ecore_vf_info *vf = OSAL_NULL;
1510 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1514 return &vf->p_vf_info;
1517 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1518 struct ecore_vf_info *p_vf)
1521 p_vf->vf_bulletin = 0;
1522 p_vf->vport_instance = 0;
1523 p_vf->configured_features = 0;
1525 /* If VF previously requested less resources, go back to default */
1526 p_vf->num_rxqs = p_vf->num_sbs;
1527 p_vf->num_txqs = p_vf->num_sbs;
1529 p_vf->num_active_rxqs = 0;
1531 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1532 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1534 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1535 if (!p_queue->cids[j].p_cid)
1538 ecore_eth_queue_cid_release(p_hwfn,
1539 p_queue->cids[j].p_cid);
1540 p_queue->cids[j].p_cid = OSAL_NULL;
1544 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1545 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1546 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1549 /* Returns either 0, or log(size) */
1550 static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
1551 struct ecore_ptt *p_ptt)
1553 u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1561 ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
1562 struct ecore_ptt *p_ptt,
1563 struct ecore_vf_info *p_vf,
1564 struct vf_pf_resc_request *p_req,
1565 struct pf_vf_resc *p_resp)
1567 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1568 u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
1569 DB_ADDR_VF(0, DQ_DEMS_LEGACY);
1572 p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
1574 /* If VF didn't bother asking for QIDs than don't bother limiting
1575 * number of CIDs. The VF doesn't care about the number, and this
1576 * has the likely result of causing an additional acquisition.
1578 if (!(p_vf->acquire.vfdev_info.capabilities &
1579 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1582 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1583 * that would make sure doorbells for all CIDs fall within the bar.
1584 * If it doesn't, make sure regview window is sufficient.
1586 if (p_vf->acquire.vfdev_info.capabilities &
1587 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1588 bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
1590 bar_size = 1 << bar_size;
1592 if (ECORE_IS_CMT(p_hwfn->p_dev))
1595 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1598 if (bar_size / db_size < 256)
1599 p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
1600 (u8)(bar_size / db_size));
1603 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1604 struct ecore_ptt *p_ptt,
1605 struct ecore_vf_info *p_vf,
1606 struct vf_pf_resc_request *p_req,
1607 struct pf_vf_resc *p_resp)
1611 /* Queue related information */
1612 p_resp->num_rxqs = p_vf->num_rxqs;
1613 p_resp->num_txqs = p_vf->num_txqs;
1614 p_resp->num_sbs = p_vf->num_sbs;
1616 for (i = 0; i < p_resp->num_sbs; i++) {
1617 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1618 /* TODO - what's this sb_qid field? Is it deprecated?
1619 * or is there an ecore_client that looks at this?
1621 p_resp->hw_sbs[i].sb_qid = 0;
1624 /* These fields are filled for backward compatibility.
1625 * Unused by modern vfs.
1627 for (i = 0; i < p_resp->num_rxqs; i++) {
1628 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1629 (u16 *)&p_resp->hw_qid[i]);
1633 /* Filter related information */
1634 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1635 p_req->num_mac_filters);
1636 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1637 p_req->num_vlan_filters);
1639 ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1641 /* This isn't really needed/enforced, but some legacy VFs might depend
1642 * on the correct filling of this field.
1644 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1646 /* Validate sufficient resources for VF */
1647 if (p_resp->num_rxqs < p_req->num_rxqs ||
1648 p_resp->num_txqs < p_req->num_txqs ||
1649 p_resp->num_sbs < p_req->num_sbs ||
1650 p_resp->num_mac_filters < p_req->num_mac_filters ||
1651 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1652 p_resp->num_mc_filters < p_req->num_mc_filters ||
1653 p_resp->num_cids < p_req->num_cids) {
1654 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1655 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1657 p_req->num_rxqs, p_resp->num_rxqs,
1658 p_req->num_rxqs, p_resp->num_txqs,
1659 p_req->num_sbs, p_resp->num_sbs,
1660 p_req->num_mac_filters, p_resp->num_mac_filters,
1661 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1662 p_req->num_mc_filters, p_resp->num_mc_filters,
1663 p_req->num_cids, p_resp->num_cids);
1665 /* Some legacy OSes are incapable of correctly handling this
1668 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1669 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1670 (p_vf->acquire.vfdev_info.os_type ==
1671 VFPF_ACQUIRE_OS_WINDOWS))
1672 return PFVF_STATUS_SUCCESS;
1674 return PFVF_STATUS_NO_RESOURCE;
1677 return PFVF_STATUS_SUCCESS;
1680 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
1682 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1683 OFFSETOF(struct mstorm_vf_zone,
1684 non_trigger.eth_queue_stat);
1685 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1686 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1687 OFFSETOF(struct ustorm_vf_zone,
1688 non_trigger.eth_queue_stat);
1689 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1690 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1691 OFFSETOF(struct pstorm_vf_zone,
1692 non_trigger.eth_queue_stat);
1693 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1694 p_stats->tstats.address = 0;
1695 p_stats->tstats.len = 0;
1698 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1699 struct ecore_ptt *p_ptt,
1700 struct ecore_vf_info *vf)
1702 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1703 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1704 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1705 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1706 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1707 struct pf_vf_resc *resc = &resp->resc;
1708 enum _ecore_status_t rc;
1710 OSAL_MEMSET(resp, 0, sizeof(*resp));
1712 /* Write the PF version so that VF would know which version
1713 * is supported - might be later overridden. This guarantees that
1714 * VF could recognize legacy PF based on lack of versions in reply.
1716 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1717 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1719 /* TODO - not doing anything is bad since we'll assert, but this isn't
1720 * necessarily the right behavior - perhaps we should have allowed some
1723 if (vf->state != VF_FREE &&
1724 vf->state != VF_STOPPED) {
1725 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1726 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1727 vf->abs_vf_id, vf->state);
1731 /* Validate FW compatibility */
1732 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1733 if (req->vfdev_info.capabilities &
1734 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1735 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1737 /* This legacy support would need to be removed once
1738 * the major has changed.
1740 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1742 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1743 "VF[%d] is pre-fastpath HSI\n",
1745 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1746 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1749 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1750 " incompatible with loaded FW's faspath"
1753 req->vfdev_info.eth_fp_hsi_major,
1754 req->vfdev_info.eth_fp_hsi_minor,
1755 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1761 /* On 100g PFs, prevent old VFs from loading */
1762 if (ECORE_IS_CMT(p_hwfn->p_dev) &&
1763 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1765 "VF[%d] is running an old driver that doesn't support"
1771 #ifndef __EXTRACT__LINUX__
1772 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1773 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1778 /* Store the acquire message */
1779 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1781 vf->opaque_fid = req->vfdev_info.opaque_fid;
1783 vf->vf_bulletin = req->bulletin_addr;
1784 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1785 vf->bulletin.size : req->bulletin_size;
1787 /* fill in pfdev info */
1788 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1789 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1790 pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1792 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1793 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1794 if (ECORE_IS_CMT(p_hwfn->p_dev))
1795 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1797 /* Share our ability to use multiple queue-ids only with VFs
1800 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1801 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1803 /* Share the sizes of the bars with VF */
1804 resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
1807 ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
1809 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1812 pfdev_info->fw_major = FW_MAJOR_VERSION;
1813 pfdev_info->fw_minor = FW_MINOR_VERSION;
1814 pfdev_info->fw_rev = FW_REVISION_VERSION;
1815 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1817 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1820 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1821 req->vfdev_info.eth_fp_hsi_minor);
1822 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1823 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1826 pfdev_info->dev_type = p_hwfn->p_dev->type;
1827 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1829 /* Fill resources available to VF; Make sure there are enough to
1830 * satisfy the VF's request.
1832 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1833 &req->resc_request, resc);
1834 if (vfpf_status != PFVF_STATUS_SUCCESS)
1837 /* Start the VF in FW */
1838 rc = ecore_sp_vf_start(p_hwfn, vf);
1839 if (rc != ECORE_SUCCESS) {
1840 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1842 vfpf_status = PFVF_STATUS_FAILURE;
1846 /* Fill agreed size of bulletin board in response, and post
1847 * an initial image to the bulletin board.
1849 resp->bulletin_size = vf->bulletin.size;
1850 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1852 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1853 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1854 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1855 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1857 vf->abs_vf_id, resp->pfdev_info.chip_num,
1858 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1859 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1860 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1861 resc->num_vlan_filters);
1863 vf->state = VF_ACQUIRED;
1866 /* Prepare Response */
1867 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1868 sizeof(struct pfvf_acquire_resp_tlv),
1872 static enum _ecore_status_t
1873 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1874 struct ecore_vf_info *p_vf, bool val)
1876 struct ecore_sp_vport_update_params params;
1877 enum _ecore_status_t rc;
1879 if (val == p_vf->spoof_chk) {
1880 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1881 "Spoofchk value[%d] is already configured\n", val);
1882 return ECORE_SUCCESS;
1885 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1886 params.opaque_fid = p_vf->opaque_fid;
1887 params.vport_id = p_vf->vport_id;
1888 params.update_anti_spoofing_en_flg = 1;
1889 params.anti_spoofing_en = val;
1891 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1893 if (rc == ECORE_SUCCESS) {
1894 p_vf->spoof_chk = val;
1895 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1896 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1897 "Spoofchk val[%d] configured\n", val);
1899 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1900 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1901 val, p_vf->relative_vf_id);
1907 static enum _ecore_status_t
1908 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1909 struct ecore_vf_info *p_vf)
1911 struct ecore_filter_ucast filter;
1912 enum _ecore_status_t rc = ECORE_SUCCESS;
1915 OSAL_MEMSET(&filter, 0, sizeof(filter));
1916 filter.is_rx_filter = 1;
1917 filter.is_tx_filter = 1;
1918 filter.vport_to_add_to = p_vf->vport_id;
1919 filter.opcode = ECORE_FILTER_ADD;
1921 /* Reconfigure vlans */
1922 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1923 if (!p_vf->shadow_config.vlans[i].used)
1926 filter.type = ECORE_FILTER_VLAN;
1927 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1928 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1929 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1930 filter.vlan, p_vf->relative_vf_id);
1931 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1932 &filter, ECORE_SPQ_MODE_CB,
1935 DP_NOTICE(p_hwfn, true,
1936 "Failed to configure VLAN [%04x]"
1938 filter.vlan, p_vf->relative_vf_id);
1946 static enum _ecore_status_t
1947 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1948 struct ecore_vf_info *p_vf, u64 events)
1950 enum _ecore_status_t rc = ECORE_SUCCESS;
1952 /*TODO - what about MACs? */
1954 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1955 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1956 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1961 static enum _ecore_status_t
1962 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1963 struct ecore_vf_info *p_vf,
1966 enum _ecore_status_t rc = ECORE_SUCCESS;
1967 struct ecore_filter_ucast filter;
1969 if (!p_vf->vport_instance)
1972 if ((events & (1 << MAC_ADDR_FORCED)) ||
1973 p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) {
1974 /* Since there's no way [currently] of removing the MAC,
1975 * we can always assume this means we need to force it.
1977 OSAL_MEMSET(&filter, 0, sizeof(filter));
1978 filter.type = ECORE_FILTER_MAC;
1979 filter.opcode = ECORE_FILTER_REPLACE;
1980 filter.is_rx_filter = 1;
1981 filter.is_tx_filter = 1;
1982 filter.vport_to_add_to = p_vf->vport_id;
1983 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1985 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1987 ECORE_SPQ_MODE_CB, OSAL_NULL);
1989 DP_NOTICE(p_hwfn, true,
1990 "PF failed to configure MAC for VF\n");
1994 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
1995 p_vf->configured_features |=
1996 1 << VFPF_BULLETIN_MAC_ADDR;
1998 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
2001 if (events & (1 << VLAN_ADDR_FORCED)) {
2002 struct ecore_sp_vport_update_params vport_update;
2006 OSAL_MEMSET(&filter, 0, sizeof(filter));
2007 filter.type = ECORE_FILTER_VLAN;
2008 filter.is_rx_filter = 1;
2009 filter.is_tx_filter = 1;
2010 filter.vport_to_add_to = p_vf->vport_id;
2011 filter.vlan = p_vf->bulletin.p_virt->pvid;
2012 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
2015 /* Send the ramrod */
2016 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2018 ECORE_SPQ_MODE_CB, OSAL_NULL);
2020 DP_NOTICE(p_hwfn, true,
2021 "PF failed to configure VLAN for VF\n");
2025 /* Update the default-vlan & silent vlan stripping */
2026 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
2027 vport_update.opaque_fid = p_vf->opaque_fid;
2028 vport_update.vport_id = p_vf->vport_id;
2029 vport_update.update_default_vlan_enable_flg = 1;
2030 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
2031 vport_update.update_default_vlan_flg = 1;
2032 vport_update.default_vlan = filter.vlan;
2034 vport_update.update_inner_vlan_removal_flg = 1;
2035 removal = filter.vlan ?
2036 1 : p_vf->shadow_config.inner_vlan_removal;
2037 vport_update.inner_vlan_removal_flg = removal;
2038 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
2039 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
2040 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
2042 DP_NOTICE(p_hwfn, true,
2043 "PF failed to configure VF vport for vlan\n");
2047 /* Update all the Rx queues */
2048 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
2049 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
2050 struct ecore_queue_cid *p_cid = OSAL_NULL;
2052 /* There can be at most 1 Rx queue on qzone. Find it */
2053 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2054 if (p_cid == OSAL_NULL)
2057 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
2060 ECORE_SPQ_MODE_EBLOCK,
2063 DP_NOTICE(p_hwfn, true,
2064 "Failed to send Rx update"
2065 " fo queue[0x%04x]\n",
2066 p_cid->rel.queue_id);
2072 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
2074 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
2077 /* If forced features are terminated, we need to configure the shadow
2078 * configuration back again.
2081 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2086 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2087 struct ecore_ptt *p_ptt,
2088 struct ecore_vf_info *vf)
2090 struct ecore_sp_vport_start_params params = { 0 };
2091 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2092 struct vfpf_vport_start_tlv *start;
2093 u8 status = PFVF_STATUS_SUCCESS;
2094 struct ecore_vf_info *vf_info;
2097 enum _ecore_status_t rc;
2099 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2101 DP_NOTICE(p_hwfn->p_dev, true,
2102 "Failed to get VF info, invalid vfid [%d]\n",
2103 vf->relative_vf_id);
2107 vf->state = VF_ENABLED;
2108 start = &mbx->req_virt->start_vport;
2110 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2112 /* Initialize Status block in CAU */
2113 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2114 if (!start->sb_addr[sb_id]) {
2115 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2116 "VF[%d] did not fill the address of SB %d\n",
2117 vf->relative_vf_id, sb_id);
2121 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2122 start->sb_addr[sb_id],
2127 vf->mtu = start->mtu;
2128 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2130 /* Take into consideration configuration forced by hypervisor;
2131 * If none is configured, use the supplied VF values [for old
2132 * vfs that would still be fine, since they passed '0' as padding].
2134 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2135 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2136 u8 vf_req = start->only_untagged;
2138 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2139 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2142 params.tpa_mode = start->tpa_mode;
2143 params.remove_inner_vlan = start->inner_vlan_removal;
2144 params.tx_switching = true;
2147 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2148 DP_NOTICE(p_hwfn, false,
2149 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2150 params.tx_switching = false;
2154 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2155 params.drop_ttl0 = false;
2156 params.concrete_fid = vf->concrete_fid;
2157 params.opaque_fid = vf->opaque_fid;
2158 params.vport_id = vf->vport_id;
2159 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2160 params.mtu = vf->mtu;
2161 params.check_mac = true;
2163 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2164 if (rc != ECORE_SUCCESS) {
2166 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2167 status = PFVF_STATUS_FAILURE;
2169 vf->vport_instance++;
2171 /* Force configuration if needed on the newly opened vport */
2172 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2173 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2174 vf->vport_id, vf->opaque_fid);
2175 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2178 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2179 sizeof(struct pfvf_def_resp_tlv), status);
2182 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2183 struct ecore_ptt *p_ptt,
2184 struct ecore_vf_info *vf)
2186 u8 status = PFVF_STATUS_SUCCESS;
2187 enum _ecore_status_t rc;
2189 OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
2190 vf->vport_instance--;
2191 vf->spoof_chk = false;
2193 if ((ecore_iov_validate_active_rxq(vf)) ||
2194 (ecore_iov_validate_active_txq(vf))) {
2195 vf->b_malicious = true;
2196 DP_NOTICE(p_hwfn, false,
2197 "VF [%02x] - considered malicious;"
2198 " Unable to stop RX/TX queuess\n",
2200 status = PFVF_STATUS_MALICIOUS;
2204 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2205 if (rc != ECORE_SUCCESS) {
2207 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2208 status = PFVF_STATUS_FAILURE;
2211 /* Forget the configuration on the vport */
2212 vf->configured_features = 0;
2213 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2216 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2217 sizeof(struct pfvf_def_resp_tlv), status);
2220 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2221 struct ecore_ptt *p_ptt,
2222 struct ecore_vf_info *vf,
2223 u8 status, bool b_legacy)
2225 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2226 struct pfvf_start_queue_resp_tlv *p_tlv;
2227 struct vfpf_start_rxq_tlv *req;
2230 mbx->offset = (u8 *)mbx->reply_virt;
2232 /* Taking a bigger struct instead of adding a TLV to list was a
2233 * mistake, but one which we're now stuck with, as some older
2234 * clients assume the size of the previous response.
2237 length = sizeof(*p_tlv);
2239 length = sizeof(struct pfvf_def_resp_tlv);
2241 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
2242 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2243 sizeof(struct channel_list_end_tlv));
2245 /* Update the TLV with the response */
2246 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2247 req = &mbx->req_virt->start_rxq;
2248 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2249 OFFSETOF(struct mstorm_vf_zone,
2250 non_trigger.eth_rx_queue_producers) +
2251 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2254 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2257 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
2258 struct ecore_vf_info *p_vf, bool b_is_tx)
2260 struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2261 struct vfpf_qid_tlv *p_qid_tlv;
2263 /* Search for the qid if the VF published if its going to provide it */
2264 if (!(p_vf->acquire.vfdev_info.capabilities &
2265 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2267 return ECORE_IOV_LEGACY_QID_TX;
2269 return ECORE_IOV_LEGACY_QID_RX;
2272 p_qid_tlv = (struct vfpf_qid_tlv *)
2273 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2275 if (p_qid_tlv == OSAL_NULL) {
2276 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2277 "VF[%2x]: Failed to provide qid\n",
2278 p_vf->relative_vf_id);
2280 return ECORE_IOV_QID_INVALID;
2283 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2284 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2285 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2286 p_vf->relative_vf_id, p_qid_tlv->qid);
2287 return ECORE_IOV_QID_INVALID;
2290 return p_qid_tlv->qid;
2293 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2294 struct ecore_ptt *p_ptt,
2295 struct ecore_vf_info *vf)
2297 struct ecore_queue_start_common_params params;
2298 struct ecore_queue_cid_vf_params vf_params;
2299 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2300 u8 status = PFVF_STATUS_NO_RESOURCE;
2301 u8 qid_usage_idx, vf_legacy = 0;
2302 struct ecore_vf_queue *p_queue;
2303 struct vfpf_start_rxq_tlv *req;
2304 struct ecore_queue_cid *p_cid;
2305 struct ecore_sb_info sb_dummy;
2306 enum _ecore_status_t rc;
2308 req = &mbx->req_virt->start_rxq;
2310 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2311 ECORE_IOV_VALIDATE_Q_DISABLE) ||
2312 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2315 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2316 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2319 p_queue = &vf->vf_queues[req->rx_qid];
2320 if (p_queue->cids[qid_usage_idx].p_cid)
2323 vf_legacy = ecore_vf_calculate_legacy(vf);
2325 /* Acquire a new queue-cid */
2326 OSAL_MEMSET(¶ms, 0, sizeof(params));
2327 params.queue_id = (u8)p_queue->fw_rx_qid;
2328 params.vport_id = vf->vport_id;
2329 params.stats_id = vf->abs_vf_id + 0x10;
2331 /* Since IGU index is passed via sb_info, construct a dummy one */
2332 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2333 sb_dummy.igu_sb_id = req->hw_sb;
2334 params.p_sb = &sb_dummy;
2335 params.sb_idx = req->sb_index;
2337 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2338 vf_params.vfid = vf->relative_vf_id;
2339 vf_params.vf_qid = (u8)req->rx_qid;
2340 vf_params.vf_legacy = vf_legacy;
2341 vf_params.qid_usage_idx = qid_usage_idx;
2343 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2344 ¶ms, true, &vf_params);
2345 if (p_cid == OSAL_NULL)
2348 /* Legacy VFs have their Producers in a different location, which they
2349 * calculate on their own and clean the producer prior to this.
2351 if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
2353 GTT_BAR0_MAP_REG_MSDM_RAM +
2354 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2357 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2362 if (rc != ECORE_SUCCESS) {
2363 status = PFVF_STATUS_FAILURE;
2364 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2366 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2367 p_queue->cids[qid_usage_idx].b_is_tx = false;
2368 status = PFVF_STATUS_SUCCESS;
2369 vf->num_active_rxqs++;
2373 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2375 ECORE_QCID_LEGACY_VF_RX_PROD));
2379 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2380 struct ecore_tunnel_info *p_tun,
2381 u16 tunn_feature_mask)
2383 p_resp->tunn_feature_mask = tunn_feature_mask;
2384 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2385 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2386 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2387 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2388 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2389 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2390 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2391 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2392 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2393 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2394 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2395 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2399 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2400 struct ecore_tunn_update_type *p_tun,
2401 enum ecore_tunn_mode mask, u8 tun_cls)
2403 if (p_req->tun_mode_update_mask & (1 << mask)) {
2404 p_tun->b_update_mode = true;
2406 if (p_req->tunn_mode & (1 << mask))
2407 p_tun->b_mode_enabled = true;
2410 p_tun->tun_cls = tun_cls;
2414 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2415 struct ecore_tunn_update_type *p_tun,
2416 struct ecore_tunn_update_udp_port *p_port,
2417 enum ecore_tunn_mode mask,
2418 u8 tun_cls, u8 update_port, u16 port)
2421 p_port->b_update_port = true;
2422 p_port->port = port;
2425 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2429 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2431 bool b_update_requested = false;
2433 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2434 p_req->update_geneve_port || p_req->update_vxlan_port)
2435 b_update_requested = true;
2437 return b_update_requested;
2440 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2441 struct ecore_ptt *p_ptt,
2442 struct ecore_vf_info *p_vf)
2444 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2445 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2446 struct pfvf_update_tunn_param_tlv *p_resp;
2447 struct vfpf_update_tunn_param_tlv *p_req;
2448 enum _ecore_status_t rc = ECORE_SUCCESS;
2449 u8 status = PFVF_STATUS_SUCCESS;
2450 bool b_update_required = false;
2451 struct ecore_tunnel_info tunn;
2452 u16 tunn_feature_mask = 0;
2455 mbx->offset = (u8 *)mbx->reply_virt;
2457 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2458 p_req = &mbx->req_virt->tunn_param_update;
2460 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2461 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2462 "No tunnel update requested by VF\n");
2463 status = PFVF_STATUS_FAILURE;
2467 tunn.b_update_rx_cls = p_req->update_tun_cls;
2468 tunn.b_update_tx_cls = p_req->update_tun_cls;
2470 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2471 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2472 p_req->update_vxlan_port,
2474 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2475 ECORE_MODE_L2GENEVE_TUNN,
2476 p_req->l2geneve_clss,
2477 p_req->update_geneve_port,
2478 p_req->geneve_port);
2479 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2480 ECORE_MODE_IPGENEVE_TUNN,
2481 p_req->ipgeneve_clss);
2482 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2483 ECORE_MODE_L2GRE_TUNN,
2485 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2486 ECORE_MODE_IPGRE_TUNN,
2489 /* If PF modifies VF's req then it should
2490 * still return an error in case of partial configuration
2491 * or modified configuration as opposed to requested one.
2493 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2494 &b_update_required, &tunn);
2496 if (rc != ECORE_SUCCESS)
2497 status = PFVF_STATUS_FAILURE;
2499 /* If ECORE client is willing to update anything ? */
2500 if (b_update_required) {
2503 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2504 ECORE_SPQ_MODE_EBLOCK,
2506 if (rc != ECORE_SUCCESS)
2507 status = PFVF_STATUS_FAILURE;
2509 geneve_port = p_tun->geneve_port.port;
2510 ecore_for_each_vf(p_hwfn, i) {
2511 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2512 p_tun->vxlan_port.port,
2518 p_resp = ecore_add_tlv(&mbx->offset,
2519 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2521 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2522 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2523 sizeof(struct channel_list_end_tlv));
2525 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2528 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2529 struct ecore_ptt *p_ptt,
2530 struct ecore_vf_info *p_vf,
2534 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2535 struct pfvf_start_queue_resp_tlv *p_tlv;
2536 bool b_legacy = false;
2539 mbx->offset = (u8 *)mbx->reply_virt;
2541 /* Taking a bigger struct instead of adding a TLV to list was a
2542 * mistake, but one which we're now stuck with, as some older
2543 * clients assume the size of the previous response.
2545 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2546 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2550 length = sizeof(*p_tlv);
2552 length = sizeof(struct pfvf_def_resp_tlv);
2554 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
2555 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2556 sizeof(struct channel_list_end_tlv));
2558 /* Update the TLV with the response */
2559 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2560 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2562 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2565 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2566 struct ecore_ptt *p_ptt,
2567 struct ecore_vf_info *vf)
2569 struct ecore_queue_start_common_params params;
2570 struct ecore_queue_cid_vf_params vf_params;
2571 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2572 u8 status = PFVF_STATUS_NO_RESOURCE;
2573 struct ecore_vf_queue *p_queue;
2574 struct vfpf_start_txq_tlv *req;
2575 struct ecore_queue_cid *p_cid;
2576 struct ecore_sb_info sb_dummy;
2577 u8 qid_usage_idx, vf_legacy;
2579 enum _ecore_status_t rc;
2582 OSAL_MEMSET(¶ms, 0, sizeof(params));
2583 req = &mbx->req_virt->start_txq;
2585 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2586 ECORE_IOV_VALIDATE_Q_NA) ||
2587 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2590 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2591 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2594 p_queue = &vf->vf_queues[req->tx_qid];
2595 if (p_queue->cids[qid_usage_idx].p_cid)
2598 vf_legacy = ecore_vf_calculate_legacy(vf);
2600 /* Acquire a new queue-cid */
2601 params.queue_id = p_queue->fw_tx_qid;
2602 params.vport_id = vf->vport_id;
2603 params.stats_id = vf->abs_vf_id + 0x10;
2605 /* Since IGU index is passed via sb_info, construct a dummy one */
2606 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2607 sb_dummy.igu_sb_id = req->hw_sb;
2608 params.p_sb = &sb_dummy;
2609 params.sb_idx = req->sb_index;
2611 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2612 vf_params.vfid = vf->relative_vf_id;
2613 vf_params.vf_qid = (u8)req->tx_qid;
2614 vf_params.vf_legacy = vf_legacy;
2615 vf_params.qid_usage_idx = qid_usage_idx;
2617 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2618 ¶ms, false, &vf_params);
2619 if (p_cid == OSAL_NULL)
2622 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2623 vf->relative_vf_id);
2624 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2625 req->pbl_addr, req->pbl_size, pq);
2626 if (rc != ECORE_SUCCESS) {
2627 status = PFVF_STATUS_FAILURE;
2628 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2630 status = PFVF_STATUS_SUCCESS;
2631 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2632 p_queue->cids[qid_usage_idx].b_is_tx = true;
2637 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2641 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2642 struct ecore_vf_info *vf,
2645 bool cqe_completion)
2647 struct ecore_vf_queue *p_queue;
2648 enum _ecore_status_t rc = ECORE_SUCCESS;
2650 if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
2651 ECORE_IOV_VALIDATE_Q_NA)) {
2652 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2653 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2654 vf->relative_vf_id, rxq_id, qid_usage_idx);
2658 p_queue = &vf->vf_queues[rxq_id];
2660 /* We've validated the index and the existence of the active RXQ -
2661 * now we need to make sure that it's using the correct qid.
2663 if (!p_queue->cids[qid_usage_idx].p_cid ||
2664 p_queue->cids[qid_usage_idx].b_is_tx) {
2665 struct ecore_queue_cid *p_cid;
2667 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2668 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2669 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2670 vf->relative_vf_id, rxq_id, qid_usage_idx,
2671 rxq_id, p_cid->qid_usage_idx);
2675 /* Now that we know we have a valid Rx-queue - close it */
2676 rc = ecore_eth_rx_queue_stop(p_hwfn,
2677 p_queue->cids[qid_usage_idx].p_cid,
2678 false, cqe_completion);
2679 if (rc != ECORE_SUCCESS)
2682 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2683 vf->num_active_rxqs--;
2685 return ECORE_SUCCESS;
2688 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2689 struct ecore_vf_info *vf,
2693 struct ecore_vf_queue *p_queue;
2694 enum _ecore_status_t rc = ECORE_SUCCESS;
2696 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2697 ECORE_IOV_VALIDATE_Q_NA))
2700 p_queue = &vf->vf_queues[txq_id];
2701 if (!p_queue->cids[qid_usage_idx].p_cid ||
2702 !p_queue->cids[qid_usage_idx].b_is_tx)
2705 rc = ecore_eth_tx_queue_stop(p_hwfn,
2706 p_queue->cids[qid_usage_idx].p_cid);
2707 if (rc != ECORE_SUCCESS)
2710 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2711 return ECORE_SUCCESS;
2714 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2715 struct ecore_ptt *p_ptt,
2716 struct ecore_vf_info *vf)
2718 u16 length = sizeof(struct pfvf_def_resp_tlv);
2719 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2720 u8 status = PFVF_STATUS_FAILURE;
2721 struct vfpf_stop_rxqs_tlv *req;
2723 enum _ecore_status_t rc;
2725 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
2726 * would be one. Since no older ecore passed multiple queues
2727 * using this API, sanitize on the value.
2729 req = &mbx->req_virt->stop_rxqs;
2730 if (req->num_rxqs != 1) {
2731 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2732 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2733 vf->relative_vf_id);
2734 status = PFVF_STATUS_NOT_SUPPORTED;
2738 /* Find which qid-index is associated with the queue */
2739 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2740 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2743 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2744 qid_usage_idx, req->cqe_completion);
2745 if (rc == ECORE_SUCCESS)
2746 status = PFVF_STATUS_SUCCESS;
2748 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2752 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2753 struct ecore_ptt *p_ptt,
2754 struct ecore_vf_info *vf)
2756 u16 length = sizeof(struct pfvf_def_resp_tlv);
2757 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2758 u8 status = PFVF_STATUS_FAILURE;
2759 struct vfpf_stop_txqs_tlv *req;
2761 enum _ecore_status_t rc;
2763 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
2764 * would be one. Since no older ecore passed multiple queues
2765 * using this API, sanitize on the value.
2767 req = &mbx->req_virt->stop_txqs;
2768 if (req->num_txqs != 1) {
2769 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2770 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2771 vf->relative_vf_id);
2772 status = PFVF_STATUS_NOT_SUPPORTED;
2776 /* Find which qid-index is associated with the queue */
2777 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2778 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2781 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
2783 if (rc == ECORE_SUCCESS)
2784 status = PFVF_STATUS_SUCCESS;
2787 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2791 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2792 struct ecore_ptt *p_ptt,
2793 struct ecore_vf_info *vf)
2795 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2796 u16 length = sizeof(struct pfvf_def_resp_tlv);
2797 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2798 struct vfpf_update_rxq_tlv *req;
2799 u8 status = PFVF_STATUS_FAILURE;
2800 u8 complete_event_flg;
2801 u8 complete_cqe_flg;
2803 enum _ecore_status_t rc;
2806 req = &mbx->req_virt->update_rxq;
2807 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2808 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2810 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2811 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2814 /* Starting with the addition of CHANNEL_TLV_QID, this API started
2815 * expecting a single queue at a time. Validate this.
2817 if ((vf->acquire.vfdev_info.capabilities &
2818 VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
2819 req->num_rxqs != 1) {
2820 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2821 "VF[%d] supports QIDs but sends multiple queues\n",
2822 vf->relative_vf_id);
2826 /* Validate inputs - for the legacy case this is still true since
2827 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2829 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2830 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2831 ECORE_IOV_VALIDATE_Q_NA) ||
2832 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2833 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2834 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2835 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2836 vf->relative_vf_id, req->rx_qid,
2842 for (i = 0; i < req->num_rxqs; i++) {
2843 u16 qid = req->rx_qid + i;
2845 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2848 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2852 ECORE_SPQ_MODE_EBLOCK,
2854 if (rc != ECORE_SUCCESS)
2857 status = PFVF_STATUS_SUCCESS;
2859 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2863 static enum _ecore_status_t
2864 ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn,
2865 struct ecore_ptt *p_ptt,
2866 struct ecore_vf_info *p_vf)
2868 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2869 struct ecore_sp_vport_update_params params;
2870 enum _ecore_status_t rc = ECORE_SUCCESS;
2871 struct vfpf_update_mtu_tlv *p_req;
2872 u8 status = PFVF_STATUS_SUCCESS;
2874 /* Valiate PF can send such a request */
2875 if (!p_vf->vport_instance) {
2876 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2877 "No VPORT instance available for VF[%d], failing MTU update\n",
2879 status = PFVF_STATUS_FAILURE;
2883 p_req = &mbx->req_virt->update_mtu;
2885 OSAL_MEMSET(¶ms, 0, sizeof(params));
2886 params.opaque_fid = p_vf->opaque_fid;
2887 params.vport_id = p_vf->vport_id;
2888 params.mtu = p_req->mtu;
2889 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
2893 status = PFVF_STATUS_FAILURE;
2895 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
2896 CHANNEL_TLV_UPDATE_MTU,
2897 sizeof(struct pfvf_def_resp_tlv),
2902 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2903 void *p_tlvs_list, u16 req_type)
2905 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2909 if (!p_tlv->length) {
2910 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2914 if (p_tlv->type == req_type) {
2915 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2916 "Extended tlv type %s, length %d found\n",
2917 ecore_channel_tlvs_string[p_tlv->type],
2922 len += p_tlv->length;
2923 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2925 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2926 DP_NOTICE(p_hwfn, true,
2927 "TLVs has overrun the buffer size\n");
2930 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2936 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2937 struct ecore_sp_vport_update_params *p_data,
2938 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2940 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2941 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2943 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2944 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2948 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2949 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2950 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2951 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2952 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2956 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2957 struct ecore_sp_vport_update_params *p_data,
2958 struct ecore_vf_info *p_vf,
2959 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2961 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2962 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2964 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2965 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2969 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2971 /* Ignore the VF request if we're forcing a vlan */
2972 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2973 p_data->update_inner_vlan_removal_flg = 1;
2974 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2977 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2981 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2982 struct ecore_sp_vport_update_params *p_data,
2983 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2985 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2986 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2988 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2989 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2990 if (!p_tx_switch_tlv)
2994 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2995 DP_NOTICE(p_hwfn, false,
2996 "FPGA: Ignore tx-switching configuration originating"
3002 p_data->update_tx_switching_flg = 1;
3003 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
3004 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
3008 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
3009 struct ecore_sp_vport_update_params *p_data,
3010 struct ecore_iov_vf_mbx *p_mbx,
3013 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
3014 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
3016 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
3017 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3021 p_data->update_approx_mcast_flg = 1;
3022 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
3023 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
3024 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
3028 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
3029 struct ecore_sp_vport_update_params *p_data,
3030 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3032 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
3033 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
3034 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
3036 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
3037 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3041 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
3042 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
3043 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
3044 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
3045 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
3049 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
3050 struct ecore_sp_vport_update_params *p_data,
3051 struct ecore_iov_vf_mbx *p_mbx,
3054 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
3055 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
3057 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
3058 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3059 if (!p_accept_any_vlan)
3062 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
3063 p_data->update_accept_any_vlan_flg =
3064 p_accept_any_vlan->update_accept_any_vlan_flg;
3065 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
3069 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
3070 struct ecore_vf_info *vf,
3071 struct ecore_sp_vport_update_params *p_data,
3072 struct ecore_rss_params *p_rss,
3073 struct ecore_iov_vf_mbx *p_mbx,
3074 u16 *tlvs_mask, u16 *tlvs_accepted)
3076 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
3077 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
3078 bool b_reject = false;
3082 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
3083 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3085 p_data->rss_params = OSAL_NULL;
3089 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
3091 p_rss->update_rss_config =
3092 !!(p_rss_tlv->update_rss_flags &
3093 VFPF_UPDATE_RSS_CONFIG_FLAG);
3094 p_rss->update_rss_capabilities =
3095 !!(p_rss_tlv->update_rss_flags &
3096 VFPF_UPDATE_RSS_CAPS_FLAG);
3097 p_rss->update_rss_ind_table =
3098 !!(p_rss_tlv->update_rss_flags &
3099 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
3100 p_rss->update_rss_key =
3101 !!(p_rss_tlv->update_rss_flags &
3102 VFPF_UPDATE_RSS_KEY_FLAG);
3104 p_rss->rss_enable = p_rss_tlv->rss_enable;
3105 p_rss->rss_eng_id = vf->rss_eng_id;
3106 p_rss->rss_caps = p_rss_tlv->rss_caps;
3107 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
3108 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
3109 sizeof(p_rss->rss_key));
3111 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
3112 (1 << p_rss_tlv->rss_table_size_log));
3114 for (i = 0; i < table_size; i++) {
3115 struct ecore_queue_cid *p_cid;
3117 q_idx = p_rss_tlv->rss_ind_table[i];
3118 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
3119 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3120 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3121 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
3122 vf->relative_vf_id, q_idx);
3127 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
3128 p_rss->rss_ind_table[i] = p_cid;
3131 p_data->rss_params = p_rss;
3133 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3135 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3139 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
3140 struct ecore_sp_vport_update_params *p_data,
3141 struct ecore_sge_tpa_params *p_sge_tpa,
3142 struct ecore_iov_vf_mbx *p_mbx,
3145 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
3146 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
3148 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
3149 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3151 if (!p_sge_tpa_tlv) {
3152 p_data->sge_tpa_params = OSAL_NULL;
3156 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
3158 p_sge_tpa->update_tpa_en_flg =
3159 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
3160 p_sge_tpa->update_tpa_param_flg =
3161 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3162 VFPF_UPDATE_TPA_PARAM_FLAG);
3164 p_sge_tpa->tpa_ipv4_en_flg =
3165 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
3166 p_sge_tpa->tpa_ipv6_en_flg =
3167 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
3168 p_sge_tpa->tpa_pkt_split_flg =
3169 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
3170 p_sge_tpa->tpa_hdr_data_split_flg =
3171 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
3172 p_sge_tpa->tpa_gro_consistent_flg =
3173 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
3175 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
3176 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
3177 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
3178 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
3179 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
3181 p_data->sge_tpa_params = p_sge_tpa;
3183 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
3186 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
3187 struct ecore_ptt *p_ptt,
3188 struct ecore_vf_info *vf)
3190 struct ecore_rss_params *p_rss_params = OSAL_NULL;
3191 struct ecore_sp_vport_update_params params;
3192 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3193 struct ecore_sge_tpa_params sge_tpa_params;
3194 u16 tlvs_mask = 0, tlvs_accepted = 0;
3195 u8 status = PFVF_STATUS_SUCCESS;
3197 enum _ecore_status_t rc;
3199 /* Valiate PF can send such a request */
3200 if (!vf->vport_instance) {
3201 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3202 "No VPORT instance available for VF[%d],"
3203 " failing vport update\n",
3205 status = PFVF_STATUS_FAILURE;
3209 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3210 if (p_rss_params == OSAL_NULL) {
3211 status = PFVF_STATUS_FAILURE;
3215 OSAL_MEMSET(¶ms, 0, sizeof(params));
3216 params.opaque_fid = vf->opaque_fid;
3217 params.vport_id = vf->vport_id;
3218 params.rss_params = OSAL_NULL;
3220 /* Search for extended tlvs list and update values
3221 * from VF in struct ecore_sp_vport_update_params.
3223 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3224 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3225 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3226 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3227 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3228 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3229 ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
3230 &sge_tpa_params, mbx, &tlvs_mask);
3232 tlvs_accepted = tlvs_mask;
3234 /* Some of the extended TLVs need to be validated first; In that case,
3235 * they can update the mask without updating the accepted [so that
3236 * PF could communicate to VF it has rejected request].
3238 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3239 mbx, &tlvs_mask, &tlvs_accepted);
3241 /* Just log a message if there is no single extended tlv in buffer.
3242 * When all features of vport update ramrod would be requested by VF
3243 * as extended TLVs in buffer then an error can be returned in response
3244 * if there is no extended TLV present in buffer.
3246 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3247 ¶ms, &tlvs_accepted) !=
3250 status = PFVF_STATUS_NOT_SUPPORTED;
3254 if (!tlvs_accepted) {
3256 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3257 "Upper-layer prevents said VF"
3258 " configuration\n");
3260 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3261 "No feature tlvs found for vport update\n");
3262 status = PFVF_STATUS_NOT_SUPPORTED;
3266 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
3270 status = PFVF_STATUS_FAILURE;
3273 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3274 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3275 tlvs_mask, tlvs_accepted);
3276 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3279 static enum _ecore_status_t
3280 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3281 struct ecore_vf_info *p_vf,
3282 struct ecore_filter_ucast *p_params)
3286 /* First remove entries and then add new ones */
3287 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3288 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3289 if (p_vf->shadow_config.vlans[i].used &&
3290 p_vf->shadow_config.vlans[i].vid ==
3292 p_vf->shadow_config.vlans[i].used = false;
3295 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3296 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3297 "VF [%d] - Tries to remove a non-existing"
3299 p_vf->relative_vf_id);
3302 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3303 p_params->opcode == ECORE_FILTER_FLUSH) {
3304 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3305 p_vf->shadow_config.vlans[i].used = false;
3308 /* In forced mode, we're willing to remove entries - but we don't add
3311 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3312 return ECORE_SUCCESS;
3314 if (p_params->opcode == ECORE_FILTER_ADD ||
3315 p_params->opcode == ECORE_FILTER_REPLACE) {
3316 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3317 if (p_vf->shadow_config.vlans[i].used)
3320 p_vf->shadow_config.vlans[i].used = true;
3321 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3325 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3326 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3327 "VF [%d] - Tries to configure more than %d"
3329 p_vf->relative_vf_id,
3330 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3335 return ECORE_SUCCESS;
3338 static enum _ecore_status_t
3339 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3340 struct ecore_vf_info *p_vf,
3341 struct ecore_filter_ucast *p_params)
3343 char empty_mac[ETH_ALEN];
3346 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3348 /* If we're in forced-mode, we don't allow any change */
3349 /* TODO - this would change if we were ever to implement logic for
3350 * removing a forced MAC altogether [in which case, like for vlans,
3351 * we should be able to re-trace previous configuration.
3353 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3354 return ECORE_SUCCESS;
3356 /* First remove entries and then add new ones */
3357 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3358 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3359 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3360 p_params->mac, ETH_ALEN)) {
3361 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3367 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3368 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3369 "MAC isn't configured\n");
3372 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3373 p_params->opcode == ECORE_FILTER_FLUSH) {
3374 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3375 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3378 /* List the new MAC address */
3379 if (p_params->opcode != ECORE_FILTER_ADD &&
3380 p_params->opcode != ECORE_FILTER_REPLACE)
3381 return ECORE_SUCCESS;
3383 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3384 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3385 empty_mac, ETH_ALEN)) {
3386 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3387 p_params->mac, ETH_ALEN);
3388 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3389 "Added MAC at %d entry in shadow\n", i);
3394 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3395 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3396 "No available place for MAC\n");
3400 return ECORE_SUCCESS;
3403 static enum _ecore_status_t
3404 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3405 struct ecore_vf_info *p_vf,
3406 struct ecore_filter_ucast *p_params)
3408 enum _ecore_status_t rc = ECORE_SUCCESS;
3410 if (p_params->type == ECORE_FILTER_MAC) {
3411 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3412 if (rc != ECORE_SUCCESS)
3416 if (p_params->type == ECORE_FILTER_VLAN)
3417 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3422 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3423 struct ecore_ptt *p_ptt,
3424 struct ecore_vf_info *vf)
3426 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3427 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3428 struct vfpf_ucast_filter_tlv *req;
3429 u8 status = PFVF_STATUS_SUCCESS;
3430 struct ecore_filter_ucast params;
3431 enum _ecore_status_t rc;
3433 /* Prepare the unicast filter params */
3434 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3435 req = &mbx->req_virt->ucast_filter;
3436 params.opcode = (enum ecore_filter_opcode)req->opcode;
3437 params.type = (enum ecore_filter_ucast_type)req->type;
3439 /* @@@TBD - We might need logic on HV side in determining this */
3440 params.is_rx_filter = 1;
3441 params.is_tx_filter = 1;
3442 params.vport_to_remove_from = vf->vport_id;
3443 params.vport_to_add_to = vf->vport_id;
3444 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3445 params.vlan = req->vlan;
3447 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3448 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3449 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3450 vf->abs_vf_id, params.opcode, params.type,
3451 params.is_rx_filter ? "RX" : "",
3452 params.is_tx_filter ? "TX" : "",
3453 params.vport_to_add_to,
3454 params.mac[0], params.mac[1], params.mac[2],
3455 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3457 if (!vf->vport_instance) {
3458 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3459 "No VPORT instance available for VF[%d],"
3460 " failing ucast MAC configuration\n",
3462 status = PFVF_STATUS_FAILURE;
3466 /* Update shadow copy of the VF configuration. In case shadow indicates
3467 * the action should be blocked return success to VF to imitate the
3468 * firmware behaviour in such case.
3470 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3474 /* Determine if the unicast filtering is acceptible by PF */
3475 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3476 (params.type == ECORE_FILTER_VLAN ||
3477 params.type == ECORE_FILTER_MAC_VLAN)) {
3478 /* Once VLAN is forced or PVID is set, do not allow
3479 * to add/replace any further VLANs.
3481 if (params.opcode == ECORE_FILTER_ADD ||
3482 params.opcode == ECORE_FILTER_REPLACE)
3483 status = PFVF_STATUS_FORCED;
3487 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3488 (params.type == ECORE_FILTER_MAC ||
3489 params.type == ECORE_FILTER_MAC_VLAN)) {
3490 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3491 (params.opcode != ECORE_FILTER_ADD &&
3492 params.opcode != ECORE_FILTER_REPLACE))
3493 status = PFVF_STATUS_FORCED;
3497 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3498 if (rc == ECORE_EXISTS) {
3500 } else if (rc == ECORE_INVAL) {
3501 status = PFVF_STATUS_FAILURE;
3505 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3506 ECORE_SPQ_MODE_CB, OSAL_NULL);
3508 status = PFVF_STATUS_FAILURE;
3511 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3512 sizeof(struct pfvf_def_resp_tlv), status);
3515 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3516 struct ecore_ptt *p_ptt,
3517 struct ecore_vf_info *vf)
3522 for (i = 0; i < vf->num_sbs; i++)
3523 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3525 vf->opaque_fid, false);
3527 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3528 sizeof(struct pfvf_def_resp_tlv),
3529 PFVF_STATUS_SUCCESS);
3532 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3533 struct ecore_ptt *p_ptt,
3534 struct ecore_vf_info *vf)
3536 u16 length = sizeof(struct pfvf_def_resp_tlv);
3537 u8 status = PFVF_STATUS_SUCCESS;
3539 /* Disable Interrupts for VF */
3540 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3542 /* Reset Permission table */
3543 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3545 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3549 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3550 struct ecore_ptt *p_ptt,
3551 struct ecore_vf_info *p_vf)
3553 u16 length = sizeof(struct pfvf_def_resp_tlv);
3554 u8 status = PFVF_STATUS_SUCCESS;
3555 enum _ecore_status_t rc = ECORE_SUCCESS;
3557 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3559 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3560 /* Stopping the VF */
3561 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3564 if (rc != ECORE_SUCCESS) {
3565 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3567 status = PFVF_STATUS_FAILURE;
3570 p_vf->state = VF_STOPPED;
3573 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3577 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
3578 struct ecore_ptt *p_ptt,
3579 struct ecore_vf_info *p_vf)
3581 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3582 struct pfvf_read_coal_resp_tlv *p_resp;
3583 struct vfpf_read_coal_req_tlv *req;
3584 u8 status = PFVF_STATUS_FAILURE;
3585 struct ecore_vf_queue *p_queue;
3586 struct ecore_queue_cid *p_cid;
3587 enum _ecore_status_t rc = ECORE_SUCCESS;
3588 u16 coal = 0, qid, i;
3591 mbx->offset = (u8 *)mbx->reply_virt;
3592 req = &mbx->req_virt->read_coal_req;
3595 b_is_rx = req->is_rx ? true : false;
3598 if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
3599 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3600 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3601 "VF[%d]: Invalid Rx queue_id = %d\n",
3602 p_vf->abs_vf_id, qid);
3606 p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3607 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3608 if (rc != ECORE_SUCCESS)
3611 if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
3612 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3613 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3614 "VF[%d]: Invalid Tx queue_id = %d\n",
3615 p_vf->abs_vf_id, qid);
3618 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3619 p_queue = &p_vf->vf_queues[qid];
3620 if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
3621 (!p_queue->cids[i].b_is_tx))
3624 p_cid = p_queue->cids[i].p_cid;
3626 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
3628 if (rc != ECORE_SUCCESS)
3634 status = PFVF_STATUS_SUCCESS;
3637 p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
3639 p_resp->coal = coal;
3641 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
3642 sizeof(struct channel_list_end_tlv));
3644 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3647 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3648 struct ecore_ptt *p_ptt,
3649 struct ecore_vf_info *vf)
3651 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3652 enum _ecore_status_t rc = ECORE_SUCCESS;
3653 struct vfpf_update_coalesce *req;
3654 u8 status = PFVF_STATUS_FAILURE;
3655 struct ecore_queue_cid *p_cid;
3656 u16 rx_coal, tx_coal;
3660 req = &mbx->req_virt->update_coalesce;
3662 rx_coal = req->rx_coal;
3663 tx_coal = req->tx_coal;
3666 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3667 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3669 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3670 vf->abs_vf_id, qid);
3674 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3675 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3677 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3678 vf->abs_vf_id, qid);
3682 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3683 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3684 vf->abs_vf_id, rx_coal, tx_coal, qid);
3687 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3689 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3690 if (rc != ECORE_SUCCESS) {
3691 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3692 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3693 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3696 vf->rx_coal = rx_coal;
3699 /* TODO - in future, it might be possible to pass this in a per-cid
3700 * granularity. For now, do this for all Tx queues.
3703 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3705 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3706 if (p_queue->cids[i].p_cid == OSAL_NULL)
3709 if (!p_queue->cids[i].b_is_tx)
3712 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3713 p_queue->cids[i].p_cid);
3714 if (rc != ECORE_SUCCESS) {
3715 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3716 "VF[%d]: Unable to set tx queue coalesce\n",
3721 vf->tx_coal = tx_coal;
3724 status = PFVF_STATUS_SUCCESS;
3726 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3727 sizeof(struct pfvf_def_resp_tlv), status);
3730 enum _ecore_status_t
3731 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
3732 u16 rx_coal, u16 tx_coal,
3735 struct ecore_queue_cid *p_cid;
3736 struct ecore_vf_info *vf;
3737 struct ecore_ptt *p_ptt;
3740 if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
3741 DP_NOTICE(p_hwfn, true,
3742 "VF[%d] - Can not set coalescing: VF is not active\n",
3747 vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
3748 p_ptt = ecore_ptt_acquire(p_hwfn);
3752 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3753 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3755 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3756 vf->abs_vf_id, qid);
3760 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3761 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3763 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3764 vf->abs_vf_id, qid);
3768 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3769 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3770 vf->abs_vf_id, rx_coal, tx_coal, qid);
3773 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3775 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3776 if (rc != ECORE_SUCCESS) {
3777 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3778 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3779 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3782 vf->rx_coal = rx_coal;
3785 /* TODO - in future, it might be possible to pass this in a per-cid
3786 * granularity. For now, do this for all Tx queues.
3789 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3791 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3792 if (p_queue->cids[i].p_cid == OSAL_NULL)
3795 if (!p_queue->cids[i].b_is_tx)
3798 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3799 p_queue->cids[i].p_cid);
3800 if (rc != ECORE_SUCCESS) {
3801 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3802 "VF[%d]: Unable to set tx queue coalesce\n",
3807 vf->tx_coal = tx_coal;
3811 ecore_ptt_release(p_hwfn, p_ptt);
3816 static enum _ecore_status_t
3817 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3818 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3823 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3825 for (cnt = 0; cnt < 50; cnt++) {
3826 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3831 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3835 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3836 p_vf->abs_vf_id, val);
3837 return ECORE_TIMEOUT;
3840 return ECORE_SUCCESS;
3843 static enum _ecore_status_t
3844 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3845 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3847 u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3850 /* Read initial consumers & producers */
3851 for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3854 cons[i] = ecore_rd(p_hwfn, p_ptt,
3855 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3857 prod = ecore_rd(p_hwfn, p_ptt,
3858 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3860 distance[i] = prod - cons[i];
3863 /* Wait for consumers to pass the producers */
3865 for (cnt = 0; cnt < 50; cnt++) {
3866 for (; i < MAX_NUM_VOQS_E4; i++) {
3869 tmp = ecore_rd(p_hwfn, p_ptt,
3870 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3872 if (distance[i] > tmp - cons[i])
3876 if (i == MAX_NUM_VOQS_E4)
3883 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3884 p_vf->abs_vf_id, i);
3885 return ECORE_TIMEOUT;
3888 return ECORE_SUCCESS;
3891 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3892 struct ecore_vf_info *p_vf,
3893 struct ecore_ptt *p_ptt)
3895 enum _ecore_status_t rc;
3897 /* TODO - add SRC and TM polling once we add storage IOV */
3899 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3903 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3907 return ECORE_SUCCESS;
3910 static enum _ecore_status_t
3911 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3912 struct ecore_ptt *p_ptt,
3913 u16 rel_vf_id, u32 *ack_vfs)
3915 struct ecore_vf_info *p_vf;
3916 enum _ecore_status_t rc = ECORE_SUCCESS;
3918 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3920 return ECORE_SUCCESS;
3922 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3923 (1ULL << (rel_vf_id % 64))) {
3924 u16 vfid = p_vf->abs_vf_id;
3926 /* TODO - should we lock channel? */
3928 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3929 "VF[%d] - Handling FLR\n", vfid);
3931 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3933 /* If VF isn't active, no need for anything but SW */
3937 /* TODO - what to do in case of failure? */
3938 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3939 if (rc != ECORE_SUCCESS)
3942 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3944 /* TODO - what's now? What a mess.... */
3945 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3949 /* Workaround to make VF-PF channel ready, as FW
3950 * doesn't do that as a part of FLR.
3953 GTT_BAR0_MAP_REG_USDM_RAM +
3954 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3956 /* VF_STOPPED has to be set only after final cleanup
3957 * but prior to re-enabling the VF.
3959 p_vf->state = VF_STOPPED;
3961 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3963 /* TODO - again, a mess... */
3964 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3969 /* Mark VF for ack and clean pending state */
3970 if (p_vf->state == VF_RESET)
3971 p_vf->state = VF_STOPPED;
3972 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3973 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3974 ~(1ULL << (rel_vf_id % 64));
3975 p_vf->vf_mbx.b_pending_msg = false;
3981 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3982 struct ecore_ptt *p_ptt)
3984 u32 ack_vfs[VF_MAX_STATIC / 32];
3985 enum _ecore_status_t rc = ECORE_SUCCESS;
3988 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3990 /* Since BRB <-> PRS interface can't be tested as part of the flr
3991 * polling due to HW limitations, simply sleep a bit. And since
3992 * there's no need to wait per-vf, do it before looping.
3996 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3997 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3999 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4003 enum _ecore_status_t
4004 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4005 struct ecore_ptt *p_ptt, u16 rel_vf_id)
4007 u32 ack_vfs[VF_MAX_STATIC / 32];
4008 enum _ecore_status_t rc = ECORE_SUCCESS;
4010 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
4012 /* Wait instead of polling the BRB <-> PRS interface */
4015 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
4017 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4021 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
4026 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
4027 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
4028 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4029 "[%08x,...,%08x]: %08x\n",
4030 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
4032 if (!p_hwfn->p_dev->p_iov_info) {
4033 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
4038 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
4039 struct ecore_vf_info *p_vf;
4042 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
4046 vfid = p_vf->abs_vf_id;
4047 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
4048 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
4049 u16 rel_vf_id = p_vf->relative_vf_id;
4051 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4052 "VF[%d] [rel %d] got FLR-ed\n",
4055 p_vf->state = VF_RESET;
4057 /* No need to lock here, since pending_flr should
4058 * only change here and before ACKing MFw. Since
4059 * MFW will not trigger an additional attention for
4060 * VF flr until ACKs, we're safe.
4062 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
4070 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
4072 struct ecore_mcp_link_params *p_params,
4073 struct ecore_mcp_link_state *p_link,
4074 struct ecore_mcp_link_capabilities *p_caps)
4076 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
4077 struct ecore_bulletin_content *p_bulletin;
4082 p_bulletin = p_vf->bulletin.p_virt;
4085 __ecore_vf_get_link_params(p_params, p_bulletin);
4087 __ecore_vf_get_link_state(p_link, p_bulletin);
4089 __ecore_vf_get_link_caps(p_caps, p_bulletin);
4092 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
4093 struct ecore_ptt *p_ptt, int vfid)
4095 struct ecore_iov_vf_mbx *mbx;
4096 struct ecore_vf_info *p_vf;
4098 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4102 mbx = &p_vf->vf_mbx;
4104 /* ecore_iov_process_mbx_request */
4105 #ifndef CONFIG_ECORE_SW_CHANNEL
4106 if (!mbx->b_pending_msg) {
4107 DP_NOTICE(p_hwfn, true,
4108 "VF[%02x]: Trying to process mailbox message when none is pending\n",
4112 mbx->b_pending_msg = false;
4115 mbx->first_tlv = mbx->req_virt->first_tlv;
4117 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4118 "VF[%02x]: Processing mailbox message [type %04x]\n",
4119 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4121 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
4122 p_vf->relative_vf_id,
4123 mbx->first_tlv.tl.type);
4125 /* Lock the per vf op mutex and note the locker's identity.
4126 * The unlock will take place in mbx response.
4128 ecore_iov_lock_vf_pf_channel(p_hwfn,
4129 p_vf, mbx->first_tlv.tl.type);
4131 /* check if tlv type is known */
4132 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
4133 !p_vf->b_malicious) {
4134 /* switch on the opcode */
4135 switch (mbx->first_tlv.tl.type) {
4136 case CHANNEL_TLV_ACQUIRE:
4137 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
4139 case CHANNEL_TLV_VPORT_START:
4140 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
4142 case CHANNEL_TLV_VPORT_TEARDOWN:
4143 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
4145 case CHANNEL_TLV_START_RXQ:
4146 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
4148 case CHANNEL_TLV_START_TXQ:
4149 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
4151 case CHANNEL_TLV_STOP_RXQS:
4152 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
4154 case CHANNEL_TLV_STOP_TXQS:
4155 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
4157 case CHANNEL_TLV_UPDATE_RXQ:
4158 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
4160 case CHANNEL_TLV_VPORT_UPDATE:
4161 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
4163 case CHANNEL_TLV_UCAST_FILTER:
4164 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
4166 case CHANNEL_TLV_CLOSE:
4167 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
4169 case CHANNEL_TLV_INT_CLEANUP:
4170 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
4172 case CHANNEL_TLV_RELEASE:
4173 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
4175 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
4176 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
4178 case CHANNEL_TLV_COALESCE_UPDATE:
4179 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
4181 case CHANNEL_TLV_COALESCE_READ:
4182 ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
4184 case CHANNEL_TLV_UPDATE_MTU:
4185 ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf);
4188 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
4189 /* If we've received a message from a VF we consider malicious
4190 * we ignore the messasge unless it's one for RELEASE, in which
4191 * case we'll let it have the benefit of doubt, allowing the
4192 * next loaded driver to start again.
4194 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
4195 /* TODO - initiate FLR, remove malicious indication */
4196 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4197 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
4200 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4201 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
4202 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4205 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4206 mbx->first_tlv.tl.type,
4207 sizeof(struct pfvf_def_resp_tlv),
4208 PFVF_STATUS_MALICIOUS);
4210 /* unknown TLV - this may belong to a VF driver from the future
4211 * - a version written after this PF driver was written, which
4212 * supports features unknown as of yet. Too bad since we don't
4213 * support them. Or this may be because someone wrote a crappy
4214 * VF driver and is sending garbage over the channel.
4216 DP_NOTICE(p_hwfn, false,
4217 "VF[%02x]: unknown TLV. type %04x length %04x"
4218 " padding %08x reply address %lu\n",
4220 mbx->first_tlv.tl.type,
4221 mbx->first_tlv.tl.length,
4222 mbx->first_tlv.padding,
4223 (unsigned long)mbx->first_tlv.reply_address);
4225 /* Try replying in case reply address matches the acquisition's
4228 if (p_vf->acquire.first_tlv.reply_address &&
4229 (mbx->first_tlv.reply_address ==
4230 p_vf->acquire.first_tlv.reply_address))
4231 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4232 mbx->first_tlv.tl.type,
4233 sizeof(struct pfvf_def_resp_tlv),
4234 PFVF_STATUS_NOT_SUPPORTED);
4236 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4237 "VF[%02x]: Can't respond to TLV -"
4238 " no valid reply address\n",
4242 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
4243 mbx->first_tlv.tl.type);
4245 #ifdef CONFIG_ECORE_SW_CHANNEL
4246 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
4247 mbx->sw_mbx.response_offset = 0;
4251 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
4256 OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4258 ecore_for_each_vf(p_hwfn, i) {
4259 struct ecore_vf_info *p_vf;
4261 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
4262 if (p_vf->vf_mbx.b_pending_msg)
4263 events[i / 64] |= 1ULL << (i % 64);
4267 static struct ecore_vf_info *
4268 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
4270 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
4272 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4273 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4274 "Got indication for VF [abs 0x%08x] that cannot be"
4280 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
4283 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
4285 struct regpair *vf_msg)
4287 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
4291 return ECORE_SUCCESS;
4293 /* List the physical address of the request so that handler
4294 * could later on copy the message from it.
4296 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4298 p_vf->vf_mbx.b_pending_msg = true;
4300 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
4303 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
4304 struct malicious_vf_eqe_data *p_data)
4306 struct ecore_vf_info *p_vf;
4308 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4313 if (!p_vf->b_malicious) {
4314 DP_NOTICE(p_hwfn, false,
4315 "VF [%d] - Malicious behavior [%02x]\n",
4316 p_vf->abs_vf_id, p_data->err_id);
4318 p_vf->b_malicious = true;
4321 "VF [%d] - Malicious behavior [%02x]\n",
4322 p_vf->abs_vf_id, p_data->err_id);
4325 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
4328 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
4331 union event_ring_data *data,
4332 u8 OSAL_UNUSED fw_return_code)
4335 case COMMON_EVENT_VF_PF_CHANNEL:
4336 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
4337 &data->vf_pf_channel.msg_addr);
4338 case COMMON_EVENT_VF_FLR:
4339 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4340 "VF-FLR is still not supported\n");
4341 return ECORE_SUCCESS;
4342 case COMMON_EVENT_MALICIOUS_VF:
4343 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4344 return ECORE_SUCCESS;
4346 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
4352 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4354 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4355 (1ULL << (rel_vf_id % 64)));
4358 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4360 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
4366 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4367 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4371 return MAX_NUM_VFS_E4;
4374 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4375 struct ecore_ptt *ptt, int vfid)
4377 struct ecore_dmae_params params;
4378 struct ecore_vf_info *vf_info;
4380 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4384 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
4385 params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
4386 params.src_vfid = vf_info->abs_vf_id;
4388 if (ecore_dmae_host2host(p_hwfn, ptt,
4389 vf_info->vf_mbx.pending_req,
4390 vf_info->vf_mbx.req_phys,
4391 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4392 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4393 "Failed to copy message from VF 0x%02x\n", vfid);
4398 return ECORE_SUCCESS;
4401 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4404 struct ecore_vf_info *vf_info;
4407 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4409 DP_NOTICE(p_hwfn->p_dev, true,
4410 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4413 if (vf_info->b_malicious) {
4414 DP_NOTICE(p_hwfn->p_dev, false,
4415 "Can't set forced MAC to malicious VF [%d]\n",
4420 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
4421 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4423 feature = 1 << MAC_ADDR_FORCED;
4425 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4427 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4428 /* Forced MAC will disable MAC_ADDR */
4429 vf_info->bulletin.p_virt->valid_bitmap &=
4430 ~(1 << VFPF_BULLETIN_MAC_ADDR);
4432 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4435 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4438 struct ecore_vf_info *vf_info;
4441 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4443 DP_NOTICE(p_hwfn->p_dev, true,
4444 "Can not set MAC, invalid vfid [%d]\n", vfid);
4447 if (vf_info->b_malicious) {
4448 DP_NOTICE(p_hwfn->p_dev, false,
4449 "Can't set MAC to malicious VF [%d]\n",
4454 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4455 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4456 "Can not set MAC, Forced MAC is configured\n");
4460 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4461 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4463 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4465 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
4466 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4468 return ECORE_SUCCESS;
4471 #ifndef LINUX_REMOVE
4472 enum _ecore_status_t
4473 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4474 bool b_untagged_only, int vfid)
4476 struct ecore_vf_info *vf_info;
4479 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4481 DP_NOTICE(p_hwfn->p_dev, true,
4482 "Can not set untagged default, invalid vfid [%d]\n",
4486 if (vf_info->b_malicious) {
4487 DP_NOTICE(p_hwfn->p_dev, false,
4488 "Can't set untagged default to malicious VF [%d]\n",
4493 /* Since this is configurable only during vport-start, don't take it
4494 * if we're past that point.
4496 if (vf_info->state == VF_ENABLED) {
4497 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4498 "Can't support untagged change for vfid[%d] -"
4499 " VF is already active\n",
4504 /* Set configuration; This will later be taken into account during the
4505 * VF initialization.
4507 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4508 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4509 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4511 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4514 return ECORE_SUCCESS;
4517 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4520 struct ecore_vf_info *vf_info;
4522 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4526 *opaque_fid = vf_info->opaque_fid;
4530 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4533 struct ecore_vf_info *vf_info;
4536 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4538 DP_NOTICE(p_hwfn->p_dev, true,
4539 "Can not set forced MAC, invalid vfid [%d]\n",
4543 if (vf_info->b_malicious) {
4544 DP_NOTICE(p_hwfn->p_dev, false,
4545 "Can't set forced vlan to malicious VF [%d]\n",
4550 feature = 1 << VLAN_ADDR_FORCED;
4551 vf_info->bulletin.p_virt->pvid = pvid;
4553 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4555 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4557 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4560 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4561 int vfid, u16 vxlan_port, u16 geneve_port)
4563 struct ecore_vf_info *vf_info;
4565 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4567 DP_NOTICE(p_hwfn->p_dev, true,
4568 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4572 if (vf_info->b_malicious) {
4573 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4574 "Can not set udp ports to malicious VF [%d]\n",
4579 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4580 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4583 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4585 struct ecore_vf_info *p_vf_info;
4587 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4591 return !!p_vf_info->vport_instance;
4594 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4596 struct ecore_vf_info *p_vf_info;
4598 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4602 return p_vf_info->state == VF_STOPPED;
4605 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4607 struct ecore_vf_info *vf_info;
4609 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4613 return vf_info->spoof_chk;
4616 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4619 struct ecore_vf_info *vf;
4620 enum _ecore_status_t rc = ECORE_INVAL;
4622 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4623 DP_NOTICE(p_hwfn, true,
4624 "SR-IOV sanity check failed, can't set spoofchk\n");
4628 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4632 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4633 /* After VF VPORT start PF will configure spoof check */
4634 vf->req_spoofchk_val = val;
4639 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4645 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4647 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4649 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4650 : ECORE_MAX_VF_CHAINS_PER_PF;
4652 return max_chains_per_vf;
4655 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4657 void **pp_req_virt_addr,
4658 u16 *p_req_virt_size)
4660 struct ecore_vf_info *vf_info =
4661 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4666 if (pp_req_virt_addr)
4667 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4669 if (p_req_virt_size)
4670 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4673 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4675 void **pp_reply_virt_addr,
4676 u16 *p_reply_virt_size)
4678 struct ecore_vf_info *vf_info =
4679 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4684 if (pp_reply_virt_addr)
4685 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4687 if (p_reply_virt_size)
4688 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4691 #ifdef CONFIG_ECORE_SW_CHANNEL
4692 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4695 struct ecore_vf_info *vf_info =
4696 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4701 return &vf_info->vf_mbx.sw_mbx;
4705 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4707 return (length >= sizeof(struct vfpf_first_tlv) &&
4708 (length <= sizeof(union vfpf_tlvs)));
4711 u32 ecore_iov_pfvf_msg_length(void)
4713 return sizeof(union pfvf_tlvs);
4716 u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
4719 struct ecore_vf_info *p_vf;
4721 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4722 if (!p_vf || !p_vf->bulletin.p_virt)
4725 if (!(p_vf->bulletin.p_virt->valid_bitmap &
4726 (1 << VFPF_BULLETIN_MAC_ADDR)))
4729 return p_vf->bulletin.p_virt->mac;
4732 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4734 struct ecore_vf_info *p_vf;
4736 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4737 if (!p_vf || !p_vf->bulletin.p_virt)
4740 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4743 return p_vf->bulletin.p_virt->mac;
4746 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4749 struct ecore_vf_info *p_vf;
4751 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4752 if (!p_vf || !p_vf->bulletin.p_virt)
4755 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4758 return p_vf->bulletin.p_virt->pvid;
4761 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4762 struct ecore_ptt *p_ptt,
4765 struct ecore_mcp_link_state *p_link;
4766 struct ecore_vf_info *vf;
4768 enum _ecore_status_t rc;
4770 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4775 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4776 if (rc != ECORE_SUCCESS)
4779 p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
4781 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
4785 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4786 struct ecore_ptt *p_ptt,
4788 struct ecore_eth_stats *p_stats)
4790 struct ecore_vf_info *vf;
4792 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4796 if (vf->state != VF_ENABLED)
4799 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4800 vf->abs_vf_id + 0x10, false);
4802 return ECORE_SUCCESS;
4805 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4807 struct ecore_vf_info *p_vf;
4809 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4813 return p_vf->num_rxqs;
4816 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4818 struct ecore_vf_info *p_vf;
4820 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4824 return p_vf->num_active_rxqs;
4827 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4829 struct ecore_vf_info *p_vf;
4831 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4838 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4840 struct ecore_vf_info *p_vf;
4842 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4846 return p_vf->num_sbs;
4849 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4851 struct ecore_vf_info *p_vf;
4853 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4857 return (p_vf->state == VF_FREE);
4860 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4863 struct ecore_vf_info *p_vf;
4865 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4869 return (p_vf->state == VF_ACQUIRED);
4872 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4874 struct ecore_vf_info *p_vf;
4876 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4880 return (p_vf->state == VF_ENABLED);
4883 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4886 struct ecore_vf_info *p_vf;
4888 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4892 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4895 enum _ecore_status_t
4896 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4898 struct ecore_wfq_data *vf_vp_wfq;
4899 struct ecore_vf_info *vf_info;
4901 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4905 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4907 if (vf_vp_wfq->configured)
4908 return vf_vp_wfq->min_speed;
4913 #ifdef CONFIG_ECORE_SW_CHANNEL
4914 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
4917 struct ecore_vf_info *vf_info;
4919 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4923 vf_info->b_hw_channel = b_is_hw;