2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
30 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
33 union event_ring_data *data,
36 const char *ecore_channel_tlvs_string[] = {
37 "CHANNEL_TLV_NONE", /* ends tlv sequence */
38 "CHANNEL_TLV_ACQUIRE",
39 "CHANNEL_TLV_VPORT_START",
40 "CHANNEL_TLV_VPORT_UPDATE",
41 "CHANNEL_TLV_VPORT_TEARDOWN",
42 "CHANNEL_TLV_START_RXQ",
43 "CHANNEL_TLV_START_TXQ",
44 "CHANNEL_TLV_STOP_RXQ",
45 "CHANNEL_TLV_STOP_TXQ",
46 "CHANNEL_TLV_UPDATE_RXQ",
47 "CHANNEL_TLV_INT_CLEANUP",
49 "CHANNEL_TLV_RELEASE",
50 "CHANNEL_TLV_LIST_END",
51 "CHANNEL_TLV_UCAST_FILTER",
52 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
53 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
54 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
55 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
56 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
57 "CHANNEL_TLV_VPORT_UPDATE_RSS",
58 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
59 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
60 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
61 "CHANNEL_TLV_COALESCE_UPDATE",
63 "CHANNEL_TLV_COALESCE_READ",
67 static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
71 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
72 ETH_HSI_VER_NO_PKT_LEN_TUNN)
73 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
75 if (!(p_vf->acquire.vfdev_info.capabilities &
76 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
77 legacy |= ECORE_QCID_LEGACY_VF_CID;
83 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
84 struct ecore_vf_info *p_vf)
86 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
87 struct ecore_spq_entry *p_ent = OSAL_NULL;
88 struct ecore_sp_init_data init_data;
89 enum _ecore_status_t rc = ECORE_NOTIMPL;
93 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
94 init_data.cid = ecore_spq_get_cid(p_hwfn);
95 init_data.opaque_fid = p_vf->opaque_fid;
96 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
98 rc = ecore_sp_init_request(p_hwfn, &p_ent,
99 COMMON_RAMROD_VF_START,
100 PROTOCOLID_COMMON, &init_data);
101 if (rc != ECORE_SUCCESS)
104 p_ramrod = &p_ent->ramrod.vf_start;
106 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
107 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
109 switch (p_hwfn->hw_info.personality) {
111 p_ramrod->personality = PERSONALITY_ETH;
113 case ECORE_PCI_ETH_ROCE:
114 case ECORE_PCI_ETH_IWARP:
115 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
118 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
119 p_hwfn->hw_info.personality);
123 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
124 if (fp_minor > ETH_HSI_VER_MINOR &&
125 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
126 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
127 "VF [%d] - Requested fp hsi %02x.%02x which is"
128 " slightly newer than PF's %02x.%02x; Configuring"
131 ETH_HSI_VER_MAJOR, fp_minor,
132 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
133 fp_minor = ETH_HSI_VER_MINOR;
136 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
137 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
139 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
140 "VF[%d] - Starting using HSI %02x.%02x\n",
141 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
143 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
146 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
150 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
151 struct ecore_spq_entry *p_ent = OSAL_NULL;
152 struct ecore_sp_init_data init_data;
153 enum _ecore_status_t rc = ECORE_NOTIMPL;
156 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
157 init_data.cid = ecore_spq_get_cid(p_hwfn);
158 init_data.opaque_fid = opaque_vfid;
159 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
161 rc = ecore_sp_init_request(p_hwfn, &p_ent,
162 COMMON_RAMROD_VF_STOP,
163 PROTOCOLID_COMMON, &init_data);
164 if (rc != ECORE_SUCCESS)
167 p_ramrod = &p_ent->ramrod.vf_stop;
169 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
171 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
174 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
175 bool b_enabled_only, bool b_non_malicious)
177 if (!p_hwfn->pf_iov_info) {
178 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
182 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
186 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
190 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
197 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
201 struct ecore_vf_info *vf = OSAL_NULL;
203 if (!p_hwfn->pf_iov_info) {
204 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
208 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
209 b_enabled_only, false))
210 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
212 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
218 static struct ecore_queue_cid *
219 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
223 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
224 if (p_queue->cids[i].p_cid &&
225 !p_queue->cids[i].b_is_tx)
226 return p_queue->cids[i].p_cid;
232 enum ecore_iov_validate_q_mode {
233 ECORE_IOV_VALIDATE_Q_NA,
234 ECORE_IOV_VALIDATE_Q_ENABLE,
235 ECORE_IOV_VALIDATE_Q_DISABLE,
238 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
240 enum ecore_iov_validate_q_mode mode,
245 if (mode == ECORE_IOV_VALIDATE_Q_NA)
248 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
249 struct ecore_vf_queue_cid *p_qcid;
251 p_qcid = &p_vf->vf_queues[qid].cids[i];
253 if (p_qcid->p_cid == OSAL_NULL)
256 if (p_qcid->b_is_tx != b_is_tx)
259 /* Found. It's enabled. */
260 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
263 /* In case we haven't found any valid cid, then its disabled */
264 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
267 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
268 struct ecore_vf_info *p_vf,
270 enum ecore_iov_validate_q_mode mode)
272 if (rx_qid >= p_vf->num_rxqs) {
273 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
274 "VF[0x%02x] - can't touch Rx queue[%04x];"
275 " Only 0x%04x are allocated\n",
276 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
280 return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
283 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
284 struct ecore_vf_info *p_vf,
286 enum ecore_iov_validate_q_mode mode)
288 if (tx_qid >= p_vf->num_txqs) {
289 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
290 "VF[0x%02x] - can't touch Tx queue[%04x];"
291 " Only 0x%04x are allocated\n",
292 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
296 return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
299 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
300 struct ecore_vf_info *p_vf,
305 for (i = 0; i < p_vf->num_sbs; i++)
306 if (p_vf->igu_sbs[i] == sb_idx)
309 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
310 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
311 " one of its 0x%02x SBs\n",
312 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
317 /* Is there at least 1 queue open? */
318 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
322 for (i = 0; i < p_vf->num_rxqs; i++)
323 if (ecore_iov_validate_queue_mode(p_vf, i,
324 ECORE_IOV_VALIDATE_Q_ENABLE,
331 static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
335 for (i = 0; i < p_vf->num_txqs; i++)
336 if (ecore_iov_validate_queue_mode(p_vf, i,
337 ECORE_IOV_VALIDATE_Q_ENABLE,
344 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
346 struct ecore_ptt *p_ptt)
348 struct ecore_bulletin_content *p_bulletin;
349 int crc_size = sizeof(p_bulletin->crc);
350 struct ecore_dmae_params params;
351 struct ecore_vf_info *p_vf;
353 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
357 /* TODO - check VF is in a state where it can accept message */
358 if (!p_vf->vf_bulletin)
361 p_bulletin = p_vf->bulletin.p_virt;
363 /* Increment bulletin board version and compute crc */
364 p_bulletin->version++;
365 p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
366 p_vf->bulletin.size - crc_size);
368 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
369 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
370 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
372 /* propagate bulletin board via dmae to vm memory */
373 OSAL_MEMSET(¶ms, 0, sizeof(params));
374 params.flags = ECORE_DMAE_FLAG_VF_DST;
375 params.dst_vfid = p_vf->abs_vf_id;
376 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
377 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
381 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
383 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
386 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
387 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
389 OSAL_PCI_READ_CONFIG_WORD(p_dev,
390 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
391 OSAL_PCI_READ_CONFIG_WORD(p_dev,
392 pos + PCI_SRIOV_INITIAL_VF,
395 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
397 /* @@@TODO - in future we might want to add an OSAL here to
398 * allow each OS to decide on its own how to act.
400 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
401 "Number of VFs are already set to non-zero value."
402 " Ignoring PCI configuration value\n");
406 OSAL_PCI_READ_CONFIG_WORD(p_dev,
407 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
409 OSAL_PCI_READ_CONFIG_WORD(p_dev,
410 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
412 OSAL_PCI_READ_CONFIG_WORD(p_dev,
413 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
415 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
416 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
418 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
420 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
422 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
423 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
424 " stride %d, page size 0x%x\n",
425 iov->nres, iov->cap, iov->ctrl,
426 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
427 iov->offset, iov->stride, iov->pgsz);
429 /* Some sanity checks */
430 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
431 iov->total_vfs > NUM_OF_VFS(p_dev)) {
432 /* This can happen only due to a bug. In this case we set
433 * num_vfs to zero to avoid memory corruption in the code that
434 * assumes max number of vfs
436 DP_NOTICE(p_dev, false,
437 "IOV: Unexpected number of vfs set: %d"
438 " setting num_vf to zero\n",
445 return ECORE_SUCCESS;
448 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
450 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
451 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
452 struct ecore_bulletin_content *p_bulletin_virt;
453 dma_addr_t req_p, rply_p, bulletin_p;
454 union pfvf_tlvs *p_reply_virt_addr;
455 union vfpf_tlvs *p_req_virt_addr;
458 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
460 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
461 req_p = p_iov_info->mbx_msg_phys_addr;
462 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
463 rply_p = p_iov_info->mbx_reply_phys_addr;
464 p_bulletin_virt = p_iov_info->p_bulletins;
465 bulletin_p = p_iov_info->bulletins_phys;
466 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
468 "ecore_iov_setup_vfdb called without alloc mem first\n");
472 for (idx = 0; idx < p_iov->total_vfs; idx++) {
473 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
476 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
477 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
478 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
479 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
481 #ifdef CONFIG_ECORE_SW_CHANNEL
482 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
483 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
485 vf->state = VF_STOPPED;
488 vf->bulletin.phys = idx *
489 sizeof(struct ecore_bulletin_content) + bulletin_p;
490 vf->bulletin.p_virt = p_bulletin_virt + idx;
491 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
493 vf->relative_vf_id = idx;
494 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
495 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
496 vf->concrete_fid = concrete;
497 /* TODO - need to devise a better way of getting opaque */
498 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
499 (vf->abs_vf_id << 8);
501 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
502 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
506 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
508 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
512 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
514 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
515 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
517 /* Allocate PF Mailbox buffer (per-VF) */
518 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
519 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
520 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
521 &p_iov_info->mbx_msg_phys_addr,
522 p_iov_info->mbx_msg_size);
526 /* Allocate PF Mailbox Reply buffer (per-VF) */
527 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
528 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
529 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
530 &p_iov_info->mbx_reply_phys_addr,
531 p_iov_info->mbx_reply_size);
535 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
537 p_v_addr = &p_iov_info->p_bulletins;
538 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
539 &p_iov_info->bulletins_phys,
540 p_iov_info->bulletins_size);
544 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
545 "PF's Requests mailbox [%p virt 0x%lx phys], "
546 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
547 " [%p virt 0x%lx phys]\n",
548 p_iov_info->mbx_msg_virt_addr,
549 (unsigned long)p_iov_info->mbx_msg_phys_addr,
550 p_iov_info->mbx_reply_virt_addr,
551 (unsigned long)p_iov_info->mbx_reply_phys_addr,
552 p_iov_info->p_bulletins,
553 (unsigned long)p_iov_info->bulletins_phys);
555 return ECORE_SUCCESS;
558 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
560 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
562 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
563 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
564 p_iov_info->mbx_msg_virt_addr,
565 p_iov_info->mbx_msg_phys_addr,
566 p_iov_info->mbx_msg_size);
568 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
569 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
570 p_iov_info->mbx_reply_virt_addr,
571 p_iov_info->mbx_reply_phys_addr,
572 p_iov_info->mbx_reply_size);
574 if (p_iov_info->p_bulletins)
575 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
576 p_iov_info->p_bulletins,
577 p_iov_info->bulletins_phys,
578 p_iov_info->bulletins_size);
581 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
583 struct ecore_pf_iov *p_sriov;
585 if (!IS_PF_SRIOV(p_hwfn)) {
586 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
587 "No SR-IOV - no need for IOV db\n");
588 return ECORE_SUCCESS;
591 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
593 DP_NOTICE(p_hwfn, true,
594 "Failed to allocate `struct ecore_sriov'\n");
598 p_hwfn->pf_iov_info = p_sriov;
600 ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
601 ecore_sriov_eqe_event);
603 return ecore_iov_allocate_vfdb(p_hwfn);
606 void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
608 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
611 ecore_iov_setup_vfdb(p_hwfn);
614 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
616 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
618 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
619 ecore_iov_free_vfdb(p_hwfn);
620 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
624 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
626 OSAL_FREE(p_dev, p_dev->p_iov_info);
629 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
631 struct ecore_dev *p_dev = p_hwfn->p_dev;
633 enum _ecore_status_t rc;
635 if (IS_VF(p_hwfn->p_dev))
636 return ECORE_SUCCESS;
638 /* Learn the PCI configuration */
639 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
640 PCI_EXT_CAP_ID_SRIOV);
642 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
643 return ECORE_SUCCESS;
646 /* Allocate a new struct for IOV information */
647 /* TODO - can change to VALLOC when its available */
648 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
649 sizeof(*p_dev->p_iov_info));
650 if (!p_dev->p_iov_info) {
651 DP_NOTICE(p_hwfn, true,
652 "Can't support IOV due to lack of memory\n");
655 p_dev->p_iov_info->pos = pos;
657 rc = ecore_iov_pci_cfg_info(p_dev);
661 /* We want PF IOV to be synonemous with the existence of p_iov_info;
662 * In case the capability is published but there are no VFs, simply
663 * de-allocate the struct.
665 if (!p_dev->p_iov_info->total_vfs) {
666 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
667 "IOV capabilities, but no VFs are published\n");
668 OSAL_FREE(p_dev, p_dev->p_iov_info);
669 return ECORE_SUCCESS;
672 /* First VF index based on offset is tricky:
673 * - If ARI is supported [likely], offset - (16 - pf_id) would
674 * provide the number for eng0. 2nd engine Vfs would begin
675 * after the first engine's VFs.
676 * - If !ARI, VFs would start on next device.
677 * so offset - (256 - pf_id) would provide the number.
678 * Utilize the fact that (256 - pf_id) is achieved only be later
679 * to diffrentiate between the two.
682 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
683 u32 first = p_hwfn->p_dev->p_iov_info->offset +
684 p_hwfn->abs_pf_id - 16;
686 p_dev->p_iov_info->first_vf_in_pf = first;
688 if (ECORE_PATH_ID(p_hwfn))
689 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
691 u32 first = p_hwfn->p_dev->p_iov_info->offset +
692 p_hwfn->abs_pf_id - 256;
694 p_dev->p_iov_info->first_vf_in_pf = first;
697 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
698 "First VF in hwfn 0x%08x\n",
699 p_dev->p_iov_info->first_vf_in_pf);
701 return ECORE_SUCCESS;
704 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
705 bool b_fail_malicious)
707 /* Check PF supports sriov */
708 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
709 !IS_PF_SRIOV_ALLOC(p_hwfn))
712 /* Check VF validity */
713 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
719 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
721 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
724 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
725 u16 rel_vf_id, u8 to_disable)
727 struct ecore_vf_info *vf;
730 for_each_hwfn(p_dev, i) {
731 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
733 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
737 vf->to_disable = to_disable;
741 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
746 if (!IS_ECORE_SRIOV(p_dev))
749 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
750 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
754 /* @@@TBD Consider taking outside of ecore... */
755 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
759 enum _ecore_status_t rc = ECORE_SUCCESS;
760 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
762 if (vf != OSAL_NULL) {
764 #ifdef CONFIG_ECORE_SW_CHANNEL
765 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
768 rc = ECORE_UNKNOWN_ERROR;
774 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
775 struct ecore_ptt *p_ptt,
778 ecore_wr(p_hwfn, p_ptt,
779 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
780 1 << (abs_vfid & 0x1f));
783 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
784 struct ecore_ptt *p_ptt,
785 struct ecore_vf_info *vf)
789 /* Set VF masks and configuration - pretend */
790 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
792 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
795 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
797 /* iterate over all queues, clear sb consumer */
798 for (i = 0; i < vf->num_sbs; i++)
799 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
801 vf->opaque_fid, true);
804 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
805 struct ecore_ptt *p_ptt,
806 struct ecore_vf_info *vf, bool enable)
810 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
812 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
815 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
817 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
819 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
822 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
825 static enum _ecore_status_t
826 ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
827 struct ecore_ptt *p_ptt,
834 /* If client overrides this, don't do anything */
835 if (p_hwfn->p_dev->b_dont_override_vf_msix)
836 return ECORE_SUCCESS;
838 /* For AH onward, configuration is per-PF. Find maximum of all
839 * the currently enabled child VFs, and set the number to be that.
841 if (!ECORE_IS_BB(p_hwfn->p_dev)) {
842 ecore_for_each_vf(p_hwfn, i) {
843 struct ecore_vf_info *p_vf;
845 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
849 current_max = OSAL_MAX_T(u8, current_max,
854 if (num_sbs > current_max)
855 return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
858 return ECORE_SUCCESS;
861 static enum _ecore_status_t
862 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
863 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
865 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
866 enum _ecore_status_t rc = ECORE_SUCCESS;
869 return ECORE_SUCCESS;
871 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
872 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
873 ECORE_VF_ABS_ID(p_hwfn, vf));
875 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
876 ECORE_VF_ABS_ID(p_hwfn, vf));
878 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
880 /* It's possible VF was previously considered malicious */
881 vf->b_malicious = false;
882 rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
883 vf->abs_vf_id, vf->num_sbs);
884 if (rc != ECORE_SUCCESS)
887 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
889 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
890 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
892 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
893 p_hwfn->hw_info.hw_mode);
896 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
905 * @brief ecore_iov_config_perm_table - configure the permission
907 * In E4, queue zone permission table size is 320x9. There
908 * are 320 VF queues for single engine device (256 for dual
909 * engine device), and each entry has the following format:
916 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
917 struct ecore_ptt *p_ptt,
918 struct ecore_vf_info *vf, u8 enable)
924 for (qid = 0; qid < vf->num_rxqs; qid++) {
925 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
928 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
929 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
930 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
934 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
935 struct ecore_ptt *p_ptt,
936 struct ecore_vf_info *vf)
938 /* Reset vf in IGU - interrupts are still disabled */
939 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
941 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
943 /* Permission Table */
944 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
947 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
948 struct ecore_ptt *p_ptt,
949 struct ecore_vf_info *vf,
952 struct ecore_igu_block *p_block;
953 struct cau_sb_entry sb_entry;
957 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
959 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
960 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
962 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
963 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
964 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
966 for (qid = 0; qid < num_rx_queues; qid++) {
967 p_block = ecore_get_igu_free_sb(p_hwfn, false);
968 vf->igu_sbs[qid] = p_block->igu_sb_id;
969 p_block->status &= ~ECORE_IGU_STATUS_FREE;
970 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
972 ecore_wr(p_hwfn, p_ptt,
973 IGU_REG_MAPPING_MEMORY +
974 sizeof(u32) * p_block->igu_sb_id, val);
976 /* Configure igu sb in CAU which were marked valid */
977 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
980 ecore_dmae_host2grc(p_hwfn, p_ptt,
981 (u64)(osal_uintptr_t)&sb_entry,
982 CAU_REG_SB_VAR_MEMORY +
983 p_block->igu_sb_id * sizeof(u64), 2, 0);
986 vf->num_sbs = (u8)num_rx_queues;
993 * @brief The function invalidates all the VF entries,
994 * technically this isn't required, but added for
995 * cleaness and ease of debugging incase a VF attempts to
996 * produce an interrupt after it has been taken down.
1002 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
1003 struct ecore_ptt *p_ptt,
1004 struct ecore_vf_info *vf)
1006 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1010 /* Invalidate igu CAM lines and mark them as free */
1011 for (idx = 0; idx < vf->num_sbs; idx++) {
1012 igu_id = vf->igu_sbs[idx];
1013 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
1015 val = ecore_rd(p_hwfn, p_ptt, addr);
1016 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1017 ecore_wr(p_hwfn, p_ptt, addr, val);
1019 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
1020 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
1026 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1028 struct ecore_mcp_link_params *params,
1029 struct ecore_mcp_link_state *link,
1030 struct ecore_mcp_link_capabilities *p_caps)
1032 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1033 struct ecore_bulletin_content *p_bulletin;
1038 p_bulletin = p_vf->bulletin.p_virt;
1039 p_bulletin->req_autoneg = params->speed.autoneg;
1040 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1041 p_bulletin->req_forced_speed = params->speed.forced_speed;
1042 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1043 p_bulletin->req_forced_rx = params->pause.forced_rx;
1044 p_bulletin->req_forced_tx = params->pause.forced_tx;
1045 p_bulletin->req_loopback = params->loopback_mode;
1047 p_bulletin->link_up = link->link_up;
1048 p_bulletin->speed = link->speed;
1049 p_bulletin->full_duplex = link->full_duplex;
1050 p_bulletin->autoneg = link->an;
1051 p_bulletin->autoneg_complete = link->an_complete;
1052 p_bulletin->parallel_detection = link->parallel_detection;
1053 p_bulletin->pfc_enabled = link->pfc_enabled;
1054 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1055 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1056 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1057 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1058 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1060 p_bulletin->capability_speed = p_caps->speed_capabilities;
1063 enum _ecore_status_t
1064 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1065 struct ecore_ptt *p_ptt,
1066 struct ecore_iov_vf_init_params *p_params)
1068 struct ecore_mcp_link_capabilities link_caps;
1069 struct ecore_mcp_link_params link_params;
1070 struct ecore_mcp_link_state link_state;
1071 u8 num_of_vf_available_chains = 0;
1072 struct ecore_vf_info *vf = OSAL_NULL;
1074 enum _ecore_status_t rc = ECORE_SUCCESS;
1078 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1080 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1081 return ECORE_UNKNOWN_ERROR;
1085 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1086 p_params->rel_vf_id);
1090 /* Perform sanity checking on the requested vport/rss */
1091 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1092 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1093 p_params->rel_vf_id, p_params->vport_id);
1097 if ((p_params->num_queues > 1) &&
1098 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1099 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1100 p_params->rel_vf_id, p_params->rss_eng_id);
1104 /* TODO - remove this once we get confidence of change */
1105 if (!p_params->vport_id) {
1106 DP_NOTICE(p_hwfn, false,
1107 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1108 p_params->rel_vf_id);
1110 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1111 DP_NOTICE(p_hwfn, false,
1112 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1113 p_params->rel_vf_id);
1115 vf->vport_id = p_params->vport_id;
1116 vf->rss_eng_id = p_params->rss_eng_id;
1118 /* Since it's possible to relocate SBs, it's a bit difficult to check
1119 * things here. Simply check whether the index falls in the range
1120 * belonging to the PF.
1122 for (i = 0; i < p_params->num_queues; i++) {
1123 qid = p_params->req_rx_queue[i];
1124 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1125 DP_NOTICE(p_hwfn, true,
1126 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1127 qid, p_params->rel_vf_id,
1128 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1132 qid = p_params->req_tx_queue[i];
1133 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1134 DP_NOTICE(p_hwfn, true,
1135 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1136 qid, p_params->rel_vf_id,
1137 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1142 /* Limit number of queues according to number of CIDs */
1143 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1144 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1145 "VF[%d] - requesting to initialize for 0x%04x queues"
1146 " [0x%04x CIDs available]\n",
1147 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1148 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1150 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1154 if (num_of_vf_available_chains == 0) {
1155 DP_ERR(p_hwfn, "no available igu sbs\n");
1159 /* Choose queue number and index ranges */
1160 vf->num_rxqs = num_of_vf_available_chains;
1161 vf->num_txqs = num_of_vf_available_chains;
1163 for (i = 0; i < vf->num_rxqs; i++) {
1164 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1166 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1167 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1169 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1170 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1171 vf->relative_vf_id, i, vf->igu_sbs[i],
1172 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1175 /* Update the link configuration in bulletin.
1177 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1178 sizeof(link_params));
1179 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1180 sizeof(link_state));
1181 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1183 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1184 &link_params, &link_state, &link_caps);
1186 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1188 if (rc == ECORE_SUCCESS) {
1190 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1191 (1ULL << (vf->relative_vf_id % 64));
1193 if (IS_LEAD_HWFN(p_hwfn))
1194 p_hwfn->p_dev->p_iov_info->num_vfs++;
1200 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1201 struct ecore_ptt *p_ptt,
1204 struct ecore_mcp_link_capabilities caps;
1205 struct ecore_mcp_link_params params;
1206 struct ecore_mcp_link_state link;
1207 struct ecore_vf_info *vf = OSAL_NULL;
1209 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1211 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1212 return ECORE_UNKNOWN_ERROR;
1215 if (vf->bulletin.p_virt)
1216 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1217 sizeof(*vf->bulletin.p_virt));
1219 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1221 /* Get the link configuration back in bulletin so
1222 * that when VFs are re-enabled they get the actual
1223 * link configuration.
1225 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1226 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1227 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1229 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1231 /* Forget the VF's acquisition message */
1232 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1234 /* disablng interrupts and resetting permission table was done during
1235 * vf-close, however, we could get here without going through vf_close
1237 /* Disable Interrupts for VF */
1238 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1240 /* Reset Permission table */
1241 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1245 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1249 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1250 ~(1ULL << (vf->relative_vf_id / 64));
1252 if (IS_LEAD_HWFN(p_hwfn))
1253 p_hwfn->p_dev->p_iov_info->num_vfs--;
1256 return ECORE_SUCCESS;
1259 static bool ecore_iov_tlv_supported(u16 tlvtype)
1261 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1264 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1265 struct ecore_vf_info *vf, u16 tlv)
1267 /* lock the channel */
1268 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1270 /* record the locking op */
1271 /* vf->op_current = tlv; @@@TBD MichalK */
1274 if (ecore_iov_tlv_supported(tlv))
1277 "VF[%d]: vf pf channel locked by %s\n",
1279 ecore_channel_tlvs_string[tlv]);
1283 "VF[%d]: vf pf channel locked by %04x\n",
1284 vf->abs_vf_id, tlv);
1287 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1288 struct ecore_vf_info *vf,
1291 /* log the unlock */
1292 if (ecore_iov_tlv_supported(expected_tlv))
1295 "VF[%d]: vf pf channel unlocked by %s\n",
1297 ecore_channel_tlvs_string[expected_tlv]);
1301 "VF[%d]: vf pf channel unlocked by %04x\n",
1302 vf->abs_vf_id, expected_tlv);
1304 /* record the locking op */
1305 /* vf->op_current = CHANNEL_TLV_NONE; */
1308 /* place a given tlv on the tlv buffer, continuing current tlv list */
1309 void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
1311 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1314 tl->length = length;
1316 /* Offset should keep pointing to next TLV (the end of the last) */
1319 /* Return a pointer to the start of the added tlv */
1320 return *offset - length;
1323 /* list the types and lengths of the tlvs on the buffer */
1324 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1326 u16 i = 1, total_length = 0;
1327 struct channel_tlv *tlv;
1330 /* cast current tlv list entry to channel tlv header */
1331 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1334 if (ecore_iov_tlv_supported(tlv->type))
1335 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1336 "TLV number %d: type %s, length %d\n",
1337 i, ecore_channel_tlvs_string[tlv->type],
1340 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1341 "TLV number %d: type %d, length %d\n",
1342 i, tlv->type, tlv->length);
1344 if (tlv->type == CHANNEL_TLV_LIST_END)
1347 /* Validate entry - protect against malicious VFs */
1349 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1352 total_length += tlv->length;
1353 if (total_length >= sizeof(struct tlv_buffer_size)) {
1354 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1362 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1363 struct ecore_ptt *p_ptt,
1364 struct ecore_vf_info *p_vf,
1365 #ifdef CONFIG_ECORE_SW_CHANNEL
1368 u16 OSAL_UNUSED length,
1372 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1373 struct ecore_dmae_params params;
1376 mbx->reply_virt->default_resp.hdr.status = status;
1378 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1380 #ifdef CONFIG_ECORE_SW_CHANNEL
1381 mbx->sw_mbx.response_size =
1382 length + sizeof(struct channel_list_end_tlv);
1384 if (!p_vf->b_hw_channel)
1388 eng_vf_id = p_vf->abs_vf_id;
1390 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1391 params.flags = ECORE_DMAE_FLAG_VF_DST;
1392 params.dst_vfid = eng_vf_id;
1394 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1395 mbx->req_virt->first_tlv.reply_address +
1397 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1400 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1401 mbx->req_virt->first_tlv.reply_address,
1402 sizeof(u64) / 4, ¶ms);
1405 GTT_BAR0_MAP_REG_USDM_RAM +
1406 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1408 OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
1411 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
1414 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1415 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1416 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1417 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1418 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1419 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1420 case ECORE_IOV_VP_UPDATE_MCAST:
1421 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1422 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1423 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1424 case ECORE_IOV_VP_UPDATE_RSS:
1425 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1426 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1427 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1428 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1429 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1435 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1436 struct ecore_vf_info *p_vf,
1437 struct ecore_iov_vf_mbx *p_mbx,
1438 u8 status, u16 tlvs_mask,
1441 struct pfvf_def_resp_tlv *resp;
1442 u16 size, total_len, i;
1444 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1445 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1446 size = sizeof(struct pfvf_def_resp_tlv);
1449 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1451 /* Prepare response for all extended tlvs if they are found by PF */
1452 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1453 if (!(tlvs_mask & (1 << i)))
1456 resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
1459 if (tlvs_accepted & (1 << i))
1460 resp->hdr.status = status;
1462 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1464 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1465 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1466 p_vf->relative_vf_id,
1467 ecore_iov_vport_to_tlv(i),
1473 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
1474 sizeof(struct channel_list_end_tlv));
1479 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1480 struct ecore_ptt *p_ptt,
1481 struct ecore_vf_info *vf_info,
1482 u16 type, u16 length, u8 status)
1484 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1486 mbx->offset = (u8 *)mbx->reply_virt;
1488 ecore_add_tlv(&mbx->offset, type, length);
1489 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
1490 sizeof(struct channel_list_end_tlv));
1492 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1495 struct ecore_public_vf_info
1496 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1498 bool b_enabled_only)
1500 struct ecore_vf_info *vf = OSAL_NULL;
1502 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1506 return &vf->p_vf_info;
1509 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1510 struct ecore_vf_info *p_vf)
1513 p_vf->vf_bulletin = 0;
1514 p_vf->vport_instance = 0;
1515 p_vf->configured_features = 0;
1517 /* If VF previously requested less resources, go back to default */
1518 p_vf->num_rxqs = p_vf->num_sbs;
1519 p_vf->num_txqs = p_vf->num_sbs;
1521 p_vf->num_active_rxqs = 0;
1523 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1524 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1526 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1527 if (!p_queue->cids[j].p_cid)
1530 ecore_eth_queue_cid_release(p_hwfn,
1531 p_queue->cids[j].p_cid);
1532 p_queue->cids[j].p_cid = OSAL_NULL;
1536 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1537 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1538 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1541 /* Returns either 0, or log(size) */
1542 static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
1543 struct ecore_ptt *p_ptt)
1545 u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1553 ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
1554 struct ecore_ptt *p_ptt,
1555 struct ecore_vf_info *p_vf,
1556 struct vf_pf_resc_request *p_req,
1557 struct pf_vf_resc *p_resp)
1559 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1560 u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
1561 DB_ADDR_VF(0, DQ_DEMS_LEGACY);
1564 p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
1566 /* If VF didn't bother asking for QIDs than don't bother limiting
1567 * number of CIDs. The VF doesn't care about the number, and this
1568 * has the likely result of causing an additional acquisition.
1570 if (!(p_vf->acquire.vfdev_info.capabilities &
1571 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1574 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1575 * that would make sure doorbells for all CIDs fall within the bar.
1576 * If it doesn't, make sure regview window is sufficient.
1578 if (p_vf->acquire.vfdev_info.capabilities &
1579 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1580 bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
1582 bar_size = 1 << bar_size;
1584 if (ECORE_IS_CMT(p_hwfn->p_dev))
1587 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1590 if (bar_size / db_size < 256)
1591 p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
1592 (u8)(bar_size / db_size));
1595 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1596 struct ecore_ptt *p_ptt,
1597 struct ecore_vf_info *p_vf,
1598 struct vf_pf_resc_request *p_req,
1599 struct pf_vf_resc *p_resp)
1603 /* Queue related information */
1604 p_resp->num_rxqs = p_vf->num_rxqs;
1605 p_resp->num_txqs = p_vf->num_txqs;
1606 p_resp->num_sbs = p_vf->num_sbs;
1608 for (i = 0; i < p_resp->num_sbs; i++) {
1609 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1610 /* TODO - what's this sb_qid field? Is it deprecated?
1611 * or is there an ecore_client that looks at this?
1613 p_resp->hw_sbs[i].sb_qid = 0;
1616 /* These fields are filled for backward compatibility.
1617 * Unused by modern vfs.
1619 for (i = 0; i < p_resp->num_rxqs; i++) {
1620 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1621 (u16 *)&p_resp->hw_qid[i]);
1625 /* Filter related information */
1626 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1627 p_req->num_mac_filters);
1628 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1629 p_req->num_vlan_filters);
1631 ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1633 /* This isn't really needed/enforced, but some legacy VFs might depend
1634 * on the correct filling of this field.
1636 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1638 /* Validate sufficient resources for VF */
1639 if (p_resp->num_rxqs < p_req->num_rxqs ||
1640 p_resp->num_txqs < p_req->num_txqs ||
1641 p_resp->num_sbs < p_req->num_sbs ||
1642 p_resp->num_mac_filters < p_req->num_mac_filters ||
1643 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1644 p_resp->num_mc_filters < p_req->num_mc_filters ||
1645 p_resp->num_cids < p_req->num_cids) {
1646 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1647 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1649 p_req->num_rxqs, p_resp->num_rxqs,
1650 p_req->num_rxqs, p_resp->num_txqs,
1651 p_req->num_sbs, p_resp->num_sbs,
1652 p_req->num_mac_filters, p_resp->num_mac_filters,
1653 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1654 p_req->num_mc_filters, p_resp->num_mc_filters,
1655 p_req->num_cids, p_resp->num_cids);
1657 /* Some legacy OSes are incapable of correctly handling this
1660 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1661 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1662 (p_vf->acquire.vfdev_info.os_type ==
1663 VFPF_ACQUIRE_OS_WINDOWS))
1664 return PFVF_STATUS_SUCCESS;
1666 return PFVF_STATUS_NO_RESOURCE;
1669 return PFVF_STATUS_SUCCESS;
1672 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
1674 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1675 OFFSETOF(struct mstorm_vf_zone,
1676 non_trigger.eth_queue_stat);
1677 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1678 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1679 OFFSETOF(struct ustorm_vf_zone,
1680 non_trigger.eth_queue_stat);
1681 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1682 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1683 OFFSETOF(struct pstorm_vf_zone,
1684 non_trigger.eth_queue_stat);
1685 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1686 p_stats->tstats.address = 0;
1687 p_stats->tstats.len = 0;
1690 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1691 struct ecore_ptt *p_ptt,
1692 struct ecore_vf_info *vf)
1694 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1695 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1696 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1697 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1698 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1699 struct pf_vf_resc *resc = &resp->resc;
1700 enum _ecore_status_t rc;
1702 OSAL_MEMSET(resp, 0, sizeof(*resp));
1704 /* Write the PF version so that VF would know which version
1705 * is supported - might be later overridden. This guarantees that
1706 * VF could recognize legacy PF based on lack of versions in reply.
1708 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1709 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1711 /* TODO - not doing anything is bad since we'll assert, but this isn't
1712 * necessarily the right behavior - perhaps we should have allowed some
1715 if (vf->state != VF_FREE &&
1716 vf->state != VF_STOPPED) {
1717 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1718 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1719 vf->abs_vf_id, vf->state);
1723 /* Validate FW compatibility */
1724 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1725 if (req->vfdev_info.capabilities &
1726 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1727 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1729 /* This legacy support would need to be removed once
1730 * the major has changed.
1732 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1734 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1735 "VF[%d] is pre-fastpath HSI\n",
1737 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1738 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1741 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1742 " incompatible with loaded FW's faspath"
1745 req->vfdev_info.eth_fp_hsi_major,
1746 req->vfdev_info.eth_fp_hsi_minor,
1747 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1753 /* On 100g PFs, prevent old VFs from loading */
1754 if (ECORE_IS_CMT(p_hwfn->p_dev) &&
1755 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1757 "VF[%d] is running an old driver that doesn't support"
1763 #ifndef __EXTRACT__LINUX__
1764 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1765 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1770 /* Store the acquire message */
1771 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1773 vf->opaque_fid = req->vfdev_info.opaque_fid;
1775 vf->vf_bulletin = req->bulletin_addr;
1776 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1777 vf->bulletin.size : req->bulletin_size;
1779 /* fill in pfdev info */
1780 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1781 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1782 pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1784 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1785 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1786 if (ECORE_IS_CMT(p_hwfn->p_dev))
1787 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1789 /* Share our ability to use multiple queue-ids only with VFs
1792 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1793 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1795 /* Share the sizes of the bars with VF */
1796 resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
1799 ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
1801 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1804 pfdev_info->fw_major = FW_MAJOR_VERSION;
1805 pfdev_info->fw_minor = FW_MINOR_VERSION;
1806 pfdev_info->fw_rev = FW_REVISION_VERSION;
1807 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1809 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1812 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1813 req->vfdev_info.eth_fp_hsi_minor);
1814 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1815 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1818 pfdev_info->dev_type = p_hwfn->p_dev->type;
1819 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1821 /* Fill resources available to VF; Make sure there are enough to
1822 * satisfy the VF's request.
1824 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1825 &req->resc_request, resc);
1826 if (vfpf_status != PFVF_STATUS_SUCCESS)
1829 /* Start the VF in FW */
1830 rc = ecore_sp_vf_start(p_hwfn, vf);
1831 if (rc != ECORE_SUCCESS) {
1832 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1834 vfpf_status = PFVF_STATUS_FAILURE;
1838 /* Fill agreed size of bulletin board in response, and post
1839 * an initial image to the bulletin board.
1841 resp->bulletin_size = vf->bulletin.size;
1842 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1844 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1845 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1846 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1847 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1849 vf->abs_vf_id, resp->pfdev_info.chip_num,
1850 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1851 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1852 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1853 resc->num_vlan_filters);
1855 vf->state = VF_ACQUIRED;
1858 /* Prepare Response */
1859 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1860 sizeof(struct pfvf_acquire_resp_tlv),
1864 static enum _ecore_status_t
1865 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1866 struct ecore_vf_info *p_vf, bool val)
1868 struct ecore_sp_vport_update_params params;
1869 enum _ecore_status_t rc;
1871 if (val == p_vf->spoof_chk) {
1872 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1873 "Spoofchk value[%d] is already configured\n", val);
1874 return ECORE_SUCCESS;
1877 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1878 params.opaque_fid = p_vf->opaque_fid;
1879 params.vport_id = p_vf->vport_id;
1880 params.update_anti_spoofing_en_flg = 1;
1881 params.anti_spoofing_en = val;
1883 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1885 if (rc == ECORE_SUCCESS) {
1886 p_vf->spoof_chk = val;
1887 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1888 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1889 "Spoofchk val[%d] configured\n", val);
1891 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1892 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1893 val, p_vf->relative_vf_id);
1899 static enum _ecore_status_t
1900 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1901 struct ecore_vf_info *p_vf)
1903 struct ecore_filter_ucast filter;
1904 enum _ecore_status_t rc = ECORE_SUCCESS;
1907 OSAL_MEMSET(&filter, 0, sizeof(filter));
1908 filter.is_rx_filter = 1;
1909 filter.is_tx_filter = 1;
1910 filter.vport_to_add_to = p_vf->vport_id;
1911 filter.opcode = ECORE_FILTER_ADD;
1913 /* Reconfigure vlans */
1914 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1915 if (!p_vf->shadow_config.vlans[i].used)
1918 filter.type = ECORE_FILTER_VLAN;
1919 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1920 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1921 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1922 filter.vlan, p_vf->relative_vf_id);
1923 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1924 &filter, ECORE_SPQ_MODE_CB,
1927 DP_NOTICE(p_hwfn, true,
1928 "Failed to configure VLAN [%04x]"
1930 filter.vlan, p_vf->relative_vf_id);
1938 static enum _ecore_status_t
1939 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1940 struct ecore_vf_info *p_vf, u64 events)
1942 enum _ecore_status_t rc = ECORE_SUCCESS;
1944 /*TODO - what about MACs? */
1946 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1947 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1948 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1953 static enum _ecore_status_t
1954 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1955 struct ecore_vf_info *p_vf,
1958 enum _ecore_status_t rc = ECORE_SUCCESS;
1959 struct ecore_filter_ucast filter;
1961 if (!p_vf->vport_instance)
1964 if (events & (1 << MAC_ADDR_FORCED)) {
1965 /* Since there's no way [currently] of removing the MAC,
1966 * we can always assume this means we need to force it.
1968 OSAL_MEMSET(&filter, 0, sizeof(filter));
1969 filter.type = ECORE_FILTER_MAC;
1970 filter.opcode = ECORE_FILTER_REPLACE;
1971 filter.is_rx_filter = 1;
1972 filter.is_tx_filter = 1;
1973 filter.vport_to_add_to = p_vf->vport_id;
1974 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1976 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1978 ECORE_SPQ_MODE_CB, OSAL_NULL);
1980 DP_NOTICE(p_hwfn, true,
1981 "PF failed to configure MAC for VF\n");
1985 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1988 if (events & (1 << VLAN_ADDR_FORCED)) {
1989 struct ecore_sp_vport_update_params vport_update;
1993 OSAL_MEMSET(&filter, 0, sizeof(filter));
1994 filter.type = ECORE_FILTER_VLAN;
1995 filter.is_rx_filter = 1;
1996 filter.is_tx_filter = 1;
1997 filter.vport_to_add_to = p_vf->vport_id;
1998 filter.vlan = p_vf->bulletin.p_virt->pvid;
1999 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
2002 /* Send the ramrod */
2003 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2005 ECORE_SPQ_MODE_CB, OSAL_NULL);
2007 DP_NOTICE(p_hwfn, true,
2008 "PF failed to configure VLAN for VF\n");
2012 /* Update the default-vlan & silent vlan stripping */
2013 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
2014 vport_update.opaque_fid = p_vf->opaque_fid;
2015 vport_update.vport_id = p_vf->vport_id;
2016 vport_update.update_default_vlan_enable_flg = 1;
2017 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
2018 vport_update.update_default_vlan_flg = 1;
2019 vport_update.default_vlan = filter.vlan;
2021 vport_update.update_inner_vlan_removal_flg = 1;
2022 removal = filter.vlan ?
2023 1 : p_vf->shadow_config.inner_vlan_removal;
2024 vport_update.inner_vlan_removal_flg = removal;
2025 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
2026 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
2027 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
2029 DP_NOTICE(p_hwfn, true,
2030 "PF failed to configure VF vport for vlan\n");
2034 /* Update all the Rx queues */
2035 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
2036 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
2037 struct ecore_queue_cid *p_cid = OSAL_NULL;
2039 /* There can be at most 1 Rx queue on qzone. Find it */
2040 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2041 if (p_cid == OSAL_NULL)
2044 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
2047 ECORE_SPQ_MODE_EBLOCK,
2050 DP_NOTICE(p_hwfn, true,
2051 "Failed to send Rx update"
2052 " fo queue[0x%04x]\n",
2053 p_cid->rel.queue_id);
2059 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
2061 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
2064 /* If forced features are terminated, we need to configure the shadow
2065 * configuration back again.
2068 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2073 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2074 struct ecore_ptt *p_ptt,
2075 struct ecore_vf_info *vf)
2077 struct ecore_sp_vport_start_params params = { 0 };
2078 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2079 struct vfpf_vport_start_tlv *start;
2080 u8 status = PFVF_STATUS_SUCCESS;
2081 struct ecore_vf_info *vf_info;
2084 enum _ecore_status_t rc;
2086 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2088 DP_NOTICE(p_hwfn->p_dev, true,
2089 "Failed to get VF info, invalid vfid [%d]\n",
2090 vf->relative_vf_id);
2094 vf->state = VF_ENABLED;
2095 start = &mbx->req_virt->start_vport;
2097 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2099 /* Initialize Status block in CAU */
2100 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2101 if (!start->sb_addr[sb_id]) {
2102 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2103 "VF[%d] did not fill the address of SB %d\n",
2104 vf->relative_vf_id, sb_id);
2108 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2109 start->sb_addr[sb_id],
2114 vf->mtu = start->mtu;
2115 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2117 /* Take into consideration configuration forced by hypervisor;
2118 * If none is configured, use the supplied VF values [for old
2119 * vfs that would still be fine, since they passed '0' as padding].
2121 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2122 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2123 u8 vf_req = start->only_untagged;
2125 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2126 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2129 params.tpa_mode = start->tpa_mode;
2130 params.remove_inner_vlan = start->inner_vlan_removal;
2131 params.tx_switching = true;
2134 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2135 DP_NOTICE(p_hwfn, false,
2136 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2137 params.tx_switching = false;
2141 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2142 params.drop_ttl0 = false;
2143 params.concrete_fid = vf->concrete_fid;
2144 params.opaque_fid = vf->opaque_fid;
2145 params.vport_id = vf->vport_id;
2146 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2147 params.mtu = vf->mtu;
2148 params.check_mac = true;
2150 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2151 if (rc != ECORE_SUCCESS) {
2153 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2154 status = PFVF_STATUS_FAILURE;
2156 vf->vport_instance++;
2158 /* Force configuration if needed on the newly opened vport */
2159 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2160 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2161 vf->vport_id, vf->opaque_fid);
2162 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2165 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2166 sizeof(struct pfvf_def_resp_tlv), status);
2169 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2170 struct ecore_ptt *p_ptt,
2171 struct ecore_vf_info *vf)
2173 u8 status = PFVF_STATUS_SUCCESS;
2174 enum _ecore_status_t rc;
2176 OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
2177 vf->vport_instance--;
2178 vf->spoof_chk = false;
2180 if ((ecore_iov_validate_active_rxq(vf)) ||
2181 (ecore_iov_validate_active_txq(vf))) {
2182 vf->b_malicious = true;
2183 DP_NOTICE(p_hwfn, false,
2184 "VF [%02x] - considered malicious;"
2185 " Unable to stop RX/TX queuess\n",
2187 status = PFVF_STATUS_MALICIOUS;
2191 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2192 if (rc != ECORE_SUCCESS) {
2194 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2195 status = PFVF_STATUS_FAILURE;
2198 /* Forget the configuration on the vport */
2199 vf->configured_features = 0;
2200 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2203 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2204 sizeof(struct pfvf_def_resp_tlv), status);
2207 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2208 struct ecore_ptt *p_ptt,
2209 struct ecore_vf_info *vf,
2210 u8 status, bool b_legacy)
2212 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2213 struct pfvf_start_queue_resp_tlv *p_tlv;
2214 struct vfpf_start_rxq_tlv *req;
2217 mbx->offset = (u8 *)mbx->reply_virt;
2219 /* Taking a bigger struct instead of adding a TLV to list was a
2220 * mistake, but one which we're now stuck with, as some older
2221 * clients assume the size of the previous response.
2224 length = sizeof(*p_tlv);
2226 length = sizeof(struct pfvf_def_resp_tlv);
2228 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
2229 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2230 sizeof(struct channel_list_end_tlv));
2232 /* Update the TLV with the response */
2233 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2234 req = &mbx->req_virt->start_rxq;
2235 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2236 OFFSETOF(struct mstorm_vf_zone,
2237 non_trigger.eth_rx_queue_producers) +
2238 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2241 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2244 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
2245 struct ecore_vf_info *p_vf, bool b_is_tx)
2247 struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2248 struct vfpf_qid_tlv *p_qid_tlv;
2250 /* Search for the qid if the VF published if its going to provide it */
2251 if (!(p_vf->acquire.vfdev_info.capabilities &
2252 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2254 return ECORE_IOV_LEGACY_QID_TX;
2256 return ECORE_IOV_LEGACY_QID_RX;
2259 p_qid_tlv = (struct vfpf_qid_tlv *)
2260 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2262 if (p_qid_tlv == OSAL_NULL) {
2263 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2264 "VF[%2x]: Failed to provide qid\n",
2265 p_vf->relative_vf_id);
2267 return ECORE_IOV_QID_INVALID;
2270 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2271 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2272 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2273 p_vf->relative_vf_id, p_qid_tlv->qid);
2274 return ECORE_IOV_QID_INVALID;
2277 return p_qid_tlv->qid;
2280 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2281 struct ecore_ptt *p_ptt,
2282 struct ecore_vf_info *vf)
2284 struct ecore_queue_start_common_params params;
2285 struct ecore_queue_cid_vf_params vf_params;
2286 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2287 u8 status = PFVF_STATUS_NO_RESOURCE;
2288 u8 qid_usage_idx, vf_legacy = 0;
2289 struct ecore_vf_queue *p_queue;
2290 struct vfpf_start_rxq_tlv *req;
2291 struct ecore_queue_cid *p_cid;
2292 struct ecore_sb_info sb_dummy;
2293 enum _ecore_status_t rc;
2295 req = &mbx->req_virt->start_rxq;
2297 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2298 ECORE_IOV_VALIDATE_Q_DISABLE) ||
2299 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2302 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2303 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2306 p_queue = &vf->vf_queues[req->rx_qid];
2307 if (p_queue->cids[qid_usage_idx].p_cid)
2310 vf_legacy = ecore_vf_calculate_legacy(vf);
2312 /* Acquire a new queue-cid */
2313 OSAL_MEMSET(¶ms, 0, sizeof(params));
2314 params.queue_id = (u8)p_queue->fw_rx_qid;
2315 params.vport_id = vf->vport_id;
2316 params.stats_id = vf->abs_vf_id + 0x10;
2318 /* Since IGU index is passed via sb_info, construct a dummy one */
2319 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2320 sb_dummy.igu_sb_id = req->hw_sb;
2321 params.p_sb = &sb_dummy;
2322 params.sb_idx = req->sb_index;
2324 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2325 vf_params.vfid = vf->relative_vf_id;
2326 vf_params.vf_qid = (u8)req->rx_qid;
2327 vf_params.vf_legacy = vf_legacy;
2328 vf_params.qid_usage_idx = qid_usage_idx;
2330 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2331 ¶ms, true, &vf_params);
2332 if (p_cid == OSAL_NULL)
2335 /* Legacy VFs have their Producers in a different location, which they
2336 * calculate on their own and clean the producer prior to this.
2338 if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
2340 GTT_BAR0_MAP_REG_MSDM_RAM +
2341 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2344 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2349 if (rc != ECORE_SUCCESS) {
2350 status = PFVF_STATUS_FAILURE;
2351 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2353 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2354 p_queue->cids[qid_usage_idx].b_is_tx = false;
2355 status = PFVF_STATUS_SUCCESS;
2356 vf->num_active_rxqs++;
2360 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2362 ECORE_QCID_LEGACY_VF_RX_PROD));
2366 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2367 struct ecore_tunnel_info *p_tun,
2368 u16 tunn_feature_mask)
2370 p_resp->tunn_feature_mask = tunn_feature_mask;
2371 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2372 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2373 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2374 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2375 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2376 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2377 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2378 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2379 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2380 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2381 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2382 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2386 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2387 struct ecore_tunn_update_type *p_tun,
2388 enum ecore_tunn_mode mask, u8 tun_cls)
2390 if (p_req->tun_mode_update_mask & (1 << mask)) {
2391 p_tun->b_update_mode = true;
2393 if (p_req->tunn_mode & (1 << mask))
2394 p_tun->b_mode_enabled = true;
2397 p_tun->tun_cls = tun_cls;
2401 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2402 struct ecore_tunn_update_type *p_tun,
2403 struct ecore_tunn_update_udp_port *p_port,
2404 enum ecore_tunn_mode mask,
2405 u8 tun_cls, u8 update_port, u16 port)
2408 p_port->b_update_port = true;
2409 p_port->port = port;
2412 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2416 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2418 bool b_update_requested = false;
2420 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2421 p_req->update_geneve_port || p_req->update_vxlan_port)
2422 b_update_requested = true;
2424 return b_update_requested;
2427 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2428 struct ecore_ptt *p_ptt,
2429 struct ecore_vf_info *p_vf)
2431 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2432 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2433 struct pfvf_update_tunn_param_tlv *p_resp;
2434 struct vfpf_update_tunn_param_tlv *p_req;
2435 enum _ecore_status_t rc = ECORE_SUCCESS;
2436 u8 status = PFVF_STATUS_SUCCESS;
2437 bool b_update_required = false;
2438 struct ecore_tunnel_info tunn;
2439 u16 tunn_feature_mask = 0;
2442 mbx->offset = (u8 *)mbx->reply_virt;
2444 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2445 p_req = &mbx->req_virt->tunn_param_update;
2447 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2448 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2449 "No tunnel update requested by VF\n");
2450 status = PFVF_STATUS_FAILURE;
2454 tunn.b_update_rx_cls = p_req->update_tun_cls;
2455 tunn.b_update_tx_cls = p_req->update_tun_cls;
2457 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2458 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2459 p_req->update_vxlan_port,
2461 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2462 ECORE_MODE_L2GENEVE_TUNN,
2463 p_req->l2geneve_clss,
2464 p_req->update_geneve_port,
2465 p_req->geneve_port);
2466 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2467 ECORE_MODE_IPGENEVE_TUNN,
2468 p_req->ipgeneve_clss);
2469 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2470 ECORE_MODE_L2GRE_TUNN,
2472 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2473 ECORE_MODE_IPGRE_TUNN,
2476 /* If PF modifies VF's req then it should
2477 * still return an error in case of partial configuration
2478 * or modified configuration as opposed to requested one.
2480 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2481 &b_update_required, &tunn);
2483 if (rc != ECORE_SUCCESS)
2484 status = PFVF_STATUS_FAILURE;
2486 /* If ECORE client is willing to update anything ? */
2487 if (b_update_required) {
2490 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2491 ECORE_SPQ_MODE_EBLOCK,
2493 if (rc != ECORE_SUCCESS)
2494 status = PFVF_STATUS_FAILURE;
2496 geneve_port = p_tun->geneve_port.port;
2497 ecore_for_each_vf(p_hwfn, i) {
2498 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2499 p_tun->vxlan_port.port,
2505 p_resp = ecore_add_tlv(&mbx->offset,
2506 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2508 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2509 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2510 sizeof(struct channel_list_end_tlv));
2512 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2515 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2516 struct ecore_ptt *p_ptt,
2517 struct ecore_vf_info *p_vf,
2521 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2522 struct pfvf_start_queue_resp_tlv *p_tlv;
2523 bool b_legacy = false;
2526 mbx->offset = (u8 *)mbx->reply_virt;
2528 /* Taking a bigger struct instead of adding a TLV to list was a
2529 * mistake, but one which we're now stuck with, as some older
2530 * clients assume the size of the previous response.
2532 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2533 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2537 length = sizeof(*p_tlv);
2539 length = sizeof(struct pfvf_def_resp_tlv);
2541 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
2542 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2543 sizeof(struct channel_list_end_tlv));
2545 /* Update the TLV with the response */
2546 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2547 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2549 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2552 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2553 struct ecore_ptt *p_ptt,
2554 struct ecore_vf_info *vf)
2556 struct ecore_queue_start_common_params params;
2557 struct ecore_queue_cid_vf_params vf_params;
2558 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2559 u8 status = PFVF_STATUS_NO_RESOURCE;
2560 struct ecore_vf_queue *p_queue;
2561 struct vfpf_start_txq_tlv *req;
2562 struct ecore_queue_cid *p_cid;
2563 struct ecore_sb_info sb_dummy;
2564 u8 qid_usage_idx, vf_legacy;
2566 enum _ecore_status_t rc;
2569 OSAL_MEMSET(¶ms, 0, sizeof(params));
2570 req = &mbx->req_virt->start_txq;
2572 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2573 ECORE_IOV_VALIDATE_Q_NA) ||
2574 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2577 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2578 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2581 p_queue = &vf->vf_queues[req->tx_qid];
2582 if (p_queue->cids[qid_usage_idx].p_cid)
2585 vf_legacy = ecore_vf_calculate_legacy(vf);
2587 /* Acquire a new queue-cid */
2588 params.queue_id = p_queue->fw_tx_qid;
2589 params.vport_id = vf->vport_id;
2590 params.stats_id = vf->abs_vf_id + 0x10;
2592 /* Since IGU index is passed via sb_info, construct a dummy one */
2593 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2594 sb_dummy.igu_sb_id = req->hw_sb;
2595 params.p_sb = &sb_dummy;
2596 params.sb_idx = req->sb_index;
2598 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2599 vf_params.vfid = vf->relative_vf_id;
2600 vf_params.vf_qid = (u8)req->tx_qid;
2601 vf_params.vf_legacy = vf_legacy;
2602 vf_params.qid_usage_idx = qid_usage_idx;
2604 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2605 ¶ms, false, &vf_params);
2606 if (p_cid == OSAL_NULL)
2609 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2610 vf->relative_vf_id);
2611 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2612 req->pbl_addr, req->pbl_size, pq);
2613 if (rc != ECORE_SUCCESS) {
2614 status = PFVF_STATUS_FAILURE;
2615 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2617 status = PFVF_STATUS_SUCCESS;
2618 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2619 p_queue->cids[qid_usage_idx].b_is_tx = true;
2624 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2628 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2629 struct ecore_vf_info *vf,
2632 bool cqe_completion)
2634 struct ecore_vf_queue *p_queue;
2635 enum _ecore_status_t rc = ECORE_SUCCESS;
2637 if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
2638 ECORE_IOV_VALIDATE_Q_NA)) {
2639 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2640 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2641 vf->relative_vf_id, rxq_id, qid_usage_idx);
2645 p_queue = &vf->vf_queues[rxq_id];
2647 /* We've validated the index and the existence of the active RXQ -
2648 * now we need to make sure that it's using the correct qid.
2650 if (!p_queue->cids[qid_usage_idx].p_cid ||
2651 p_queue->cids[qid_usage_idx].b_is_tx) {
2652 struct ecore_queue_cid *p_cid;
2654 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2655 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2656 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2657 vf->relative_vf_id, rxq_id, qid_usage_idx,
2658 rxq_id, p_cid->qid_usage_idx);
2662 /* Now that we know we have a valid Rx-queue - close it */
2663 rc = ecore_eth_rx_queue_stop(p_hwfn,
2664 p_queue->cids[qid_usage_idx].p_cid,
2665 false, cqe_completion);
2666 if (rc != ECORE_SUCCESS)
2669 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2670 vf->num_active_rxqs--;
2672 return ECORE_SUCCESS;
2675 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2676 struct ecore_vf_info *vf,
2680 struct ecore_vf_queue *p_queue;
2681 enum _ecore_status_t rc = ECORE_SUCCESS;
2683 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2684 ECORE_IOV_VALIDATE_Q_NA))
2687 p_queue = &vf->vf_queues[txq_id];
2688 if (!p_queue->cids[qid_usage_idx].p_cid ||
2689 !p_queue->cids[qid_usage_idx].b_is_tx)
2692 rc = ecore_eth_tx_queue_stop(p_hwfn,
2693 p_queue->cids[qid_usage_idx].p_cid);
2694 if (rc != ECORE_SUCCESS)
2697 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2698 return ECORE_SUCCESS;
2701 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2702 struct ecore_ptt *p_ptt,
2703 struct ecore_vf_info *vf)
2705 u16 length = sizeof(struct pfvf_def_resp_tlv);
2706 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2707 u8 status = PFVF_STATUS_FAILURE;
2708 struct vfpf_stop_rxqs_tlv *req;
2710 enum _ecore_status_t rc;
2712 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
2713 * would be one. Since no older ecore passed multiple queues
2714 * using this API, sanitize on the value.
2716 req = &mbx->req_virt->stop_rxqs;
2717 if (req->num_rxqs != 1) {
2718 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2719 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2720 vf->relative_vf_id);
2721 status = PFVF_STATUS_NOT_SUPPORTED;
2725 /* Find which qid-index is associated with the queue */
2726 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2727 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2730 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2731 qid_usage_idx, req->cqe_completion);
2732 if (rc == ECORE_SUCCESS)
2733 status = PFVF_STATUS_SUCCESS;
2735 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2739 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2740 struct ecore_ptt *p_ptt,
2741 struct ecore_vf_info *vf)
2743 u16 length = sizeof(struct pfvf_def_resp_tlv);
2744 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2745 u8 status = PFVF_STATUS_FAILURE;
2746 struct vfpf_stop_txqs_tlv *req;
2748 enum _ecore_status_t rc;
2750 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
2751 * would be one. Since no older ecore passed multiple queues
2752 * using this API, sanitize on the value.
2754 req = &mbx->req_virt->stop_txqs;
2755 if (req->num_txqs != 1) {
2756 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2757 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2758 vf->relative_vf_id);
2759 status = PFVF_STATUS_NOT_SUPPORTED;
2763 /* Find which qid-index is associated with the queue */
2764 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2765 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2768 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
2770 if (rc == ECORE_SUCCESS)
2771 status = PFVF_STATUS_SUCCESS;
2774 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2778 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2779 struct ecore_ptt *p_ptt,
2780 struct ecore_vf_info *vf)
2782 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2783 u16 length = sizeof(struct pfvf_def_resp_tlv);
2784 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2785 struct vfpf_update_rxq_tlv *req;
2786 u8 status = PFVF_STATUS_FAILURE;
2787 u8 complete_event_flg;
2788 u8 complete_cqe_flg;
2790 enum _ecore_status_t rc;
2793 req = &mbx->req_virt->update_rxq;
2794 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2795 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2797 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2798 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2801 /* Starting with the addition of CHANNEL_TLV_QID, this API started
2802 * expecting a single queue at a time. Validate this.
2804 if ((vf->acquire.vfdev_info.capabilities &
2805 VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
2806 req->num_rxqs != 1) {
2807 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2808 "VF[%d] supports QIDs but sends multiple queues\n",
2809 vf->relative_vf_id);
2813 /* Validate inputs - for the legacy case this is still true since
2814 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2816 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2817 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2818 ECORE_IOV_VALIDATE_Q_NA) ||
2819 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2820 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2821 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2822 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2823 vf->relative_vf_id, req->rx_qid,
2829 for (i = 0; i < req->num_rxqs; i++) {
2830 u16 qid = req->rx_qid + i;
2832 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2835 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2839 ECORE_SPQ_MODE_EBLOCK,
2841 if (rc != ECORE_SUCCESS)
2844 status = PFVF_STATUS_SUCCESS;
2846 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2850 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2851 void *p_tlvs_list, u16 req_type)
2853 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2857 if (!p_tlv->length) {
2858 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2862 if (p_tlv->type == req_type) {
2863 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2864 "Extended tlv type %s, length %d found\n",
2865 ecore_channel_tlvs_string[p_tlv->type],
2870 len += p_tlv->length;
2871 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2873 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2874 DP_NOTICE(p_hwfn, true,
2875 "TLVs has overrun the buffer size\n");
2878 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2884 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2885 struct ecore_sp_vport_update_params *p_data,
2886 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2888 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2889 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2891 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2892 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2896 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2897 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2898 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2899 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2900 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2904 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2905 struct ecore_sp_vport_update_params *p_data,
2906 struct ecore_vf_info *p_vf,
2907 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2909 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2910 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2912 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2913 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2917 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2919 /* Ignore the VF request if we're forcing a vlan */
2920 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2921 p_data->update_inner_vlan_removal_flg = 1;
2922 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2925 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2929 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2930 struct ecore_sp_vport_update_params *p_data,
2931 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2933 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2934 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2936 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2937 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2938 if (!p_tx_switch_tlv)
2942 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2943 DP_NOTICE(p_hwfn, false,
2944 "FPGA: Ignore tx-switching configuration originating"
2950 p_data->update_tx_switching_flg = 1;
2951 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2952 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2956 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2957 struct ecore_sp_vport_update_params *p_data,
2958 struct ecore_iov_vf_mbx *p_mbx,
2961 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2962 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2964 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2965 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2969 p_data->update_approx_mcast_flg = 1;
2970 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2971 sizeof(unsigned long) *
2972 ETH_MULTICAST_MAC_BINS_IN_REGS);
2973 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2977 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2978 struct ecore_sp_vport_update_params *p_data,
2979 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2981 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2982 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2983 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2985 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2986 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2990 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2991 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2992 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2993 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2994 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
2998 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
2999 struct ecore_sp_vport_update_params *p_data,
3000 struct ecore_iov_vf_mbx *p_mbx,
3003 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
3004 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
3006 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
3007 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3008 if (!p_accept_any_vlan)
3011 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
3012 p_data->update_accept_any_vlan_flg =
3013 p_accept_any_vlan->update_accept_any_vlan_flg;
3014 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
3018 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
3019 struct ecore_vf_info *vf,
3020 struct ecore_sp_vport_update_params *p_data,
3021 struct ecore_rss_params *p_rss,
3022 struct ecore_iov_vf_mbx *p_mbx,
3023 u16 *tlvs_mask, u16 *tlvs_accepted)
3025 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
3026 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
3027 bool b_reject = false;
3031 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
3032 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3034 p_data->rss_params = OSAL_NULL;
3038 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
3040 p_rss->update_rss_config =
3041 !!(p_rss_tlv->update_rss_flags &
3042 VFPF_UPDATE_RSS_CONFIG_FLAG);
3043 p_rss->update_rss_capabilities =
3044 !!(p_rss_tlv->update_rss_flags &
3045 VFPF_UPDATE_RSS_CAPS_FLAG);
3046 p_rss->update_rss_ind_table =
3047 !!(p_rss_tlv->update_rss_flags &
3048 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
3049 p_rss->update_rss_key =
3050 !!(p_rss_tlv->update_rss_flags &
3051 VFPF_UPDATE_RSS_KEY_FLAG);
3053 p_rss->rss_enable = p_rss_tlv->rss_enable;
3054 p_rss->rss_eng_id = vf->rss_eng_id;
3055 p_rss->rss_caps = p_rss_tlv->rss_caps;
3056 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
3057 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
3058 sizeof(p_rss->rss_key));
3060 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
3061 (1 << p_rss_tlv->rss_table_size_log));
3063 for (i = 0; i < table_size; i++) {
3064 struct ecore_queue_cid *p_cid;
3066 q_idx = p_rss_tlv->rss_ind_table[i];
3067 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
3068 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3069 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3070 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
3071 vf->relative_vf_id, q_idx);
3076 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
3077 p_rss->rss_ind_table[i] = p_cid;
3080 p_data->rss_params = p_rss;
3082 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3084 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3088 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
3089 struct ecore_sp_vport_update_params *p_data,
3090 struct ecore_sge_tpa_params *p_sge_tpa,
3091 struct ecore_iov_vf_mbx *p_mbx,
3094 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
3095 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
3097 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
3098 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3100 if (!p_sge_tpa_tlv) {
3101 p_data->sge_tpa_params = OSAL_NULL;
3105 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
3107 p_sge_tpa->update_tpa_en_flg =
3108 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
3109 p_sge_tpa->update_tpa_param_flg =
3110 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3111 VFPF_UPDATE_TPA_PARAM_FLAG);
3113 p_sge_tpa->tpa_ipv4_en_flg =
3114 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
3115 p_sge_tpa->tpa_ipv6_en_flg =
3116 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
3117 p_sge_tpa->tpa_pkt_split_flg =
3118 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
3119 p_sge_tpa->tpa_hdr_data_split_flg =
3120 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
3121 p_sge_tpa->tpa_gro_consistent_flg =
3122 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
3124 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
3125 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
3126 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
3127 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
3128 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
3130 p_data->sge_tpa_params = p_sge_tpa;
3132 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
3135 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
3136 struct ecore_ptt *p_ptt,
3137 struct ecore_vf_info *vf)
3139 struct ecore_rss_params *p_rss_params = OSAL_NULL;
3140 struct ecore_sp_vport_update_params params;
3141 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3142 struct ecore_sge_tpa_params sge_tpa_params;
3143 u16 tlvs_mask = 0, tlvs_accepted = 0;
3144 u8 status = PFVF_STATUS_SUCCESS;
3146 enum _ecore_status_t rc;
3148 /* Valiate PF can send such a request */
3149 if (!vf->vport_instance) {
3150 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3151 "No VPORT instance available for VF[%d],"
3152 " failing vport update\n",
3154 status = PFVF_STATUS_FAILURE;
3158 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3159 if (p_rss_params == OSAL_NULL) {
3160 status = PFVF_STATUS_FAILURE;
3164 OSAL_MEMSET(¶ms, 0, sizeof(params));
3165 params.opaque_fid = vf->opaque_fid;
3166 params.vport_id = vf->vport_id;
3167 params.rss_params = OSAL_NULL;
3169 /* Search for extended tlvs list and update values
3170 * from VF in struct ecore_sp_vport_update_params.
3172 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3173 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3174 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3175 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3176 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3177 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3178 ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
3179 &sge_tpa_params, mbx, &tlvs_mask);
3181 tlvs_accepted = tlvs_mask;
3183 /* Some of the extended TLVs need to be validated first; In that case,
3184 * they can update the mask without updating the accepted [so that
3185 * PF could communicate to VF it has rejected request].
3187 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3188 mbx, &tlvs_mask, &tlvs_accepted);
3190 /* Just log a message if there is no single extended tlv in buffer.
3191 * When all features of vport update ramrod would be requested by VF
3192 * as extended TLVs in buffer then an error can be returned in response
3193 * if there is no extended TLV present in buffer.
3195 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3196 ¶ms, &tlvs_accepted) !=
3199 status = PFVF_STATUS_NOT_SUPPORTED;
3203 if (!tlvs_accepted) {
3205 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3206 "Upper-layer prevents said VF"
3207 " configuration\n");
3209 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3210 "No feature tlvs found for vport update\n");
3211 status = PFVF_STATUS_NOT_SUPPORTED;
3215 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
3219 status = PFVF_STATUS_FAILURE;
3222 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3223 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3224 tlvs_mask, tlvs_accepted);
3225 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3228 static enum _ecore_status_t
3229 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3230 struct ecore_vf_info *p_vf,
3231 struct ecore_filter_ucast *p_params)
3235 /* First remove entries and then add new ones */
3236 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3237 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3238 if (p_vf->shadow_config.vlans[i].used &&
3239 p_vf->shadow_config.vlans[i].vid ==
3241 p_vf->shadow_config.vlans[i].used = false;
3244 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3245 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3246 "VF [%d] - Tries to remove a non-existing"
3248 p_vf->relative_vf_id);
3251 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3252 p_params->opcode == ECORE_FILTER_FLUSH) {
3253 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3254 p_vf->shadow_config.vlans[i].used = false;
3257 /* In forced mode, we're willing to remove entries - but we don't add
3260 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3261 return ECORE_SUCCESS;
3263 if (p_params->opcode == ECORE_FILTER_ADD ||
3264 p_params->opcode == ECORE_FILTER_REPLACE) {
3265 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3266 if (p_vf->shadow_config.vlans[i].used)
3269 p_vf->shadow_config.vlans[i].used = true;
3270 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3274 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3275 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3276 "VF [%d] - Tries to configure more than %d"
3278 p_vf->relative_vf_id,
3279 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3284 return ECORE_SUCCESS;
3287 static enum _ecore_status_t
3288 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3289 struct ecore_vf_info *p_vf,
3290 struct ecore_filter_ucast *p_params)
3292 char empty_mac[ETH_ALEN];
3295 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3297 /* If we're in forced-mode, we don't allow any change */
3298 /* TODO - this would change if we were ever to implement logic for
3299 * removing a forced MAC altogether [in which case, like for vlans,
3300 * we should be able to re-trace previous configuration.
3302 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3303 return ECORE_SUCCESS;
3305 /* First remove entries and then add new ones */
3306 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3307 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3308 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3309 p_params->mac, ETH_ALEN)) {
3310 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3316 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3317 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3318 "MAC isn't configured\n");
3321 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3322 p_params->opcode == ECORE_FILTER_FLUSH) {
3323 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3324 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3327 /* List the new MAC address */
3328 if (p_params->opcode != ECORE_FILTER_ADD &&
3329 p_params->opcode != ECORE_FILTER_REPLACE)
3330 return ECORE_SUCCESS;
3332 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3333 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3334 empty_mac, ETH_ALEN)) {
3335 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3336 p_params->mac, ETH_ALEN);
3337 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3338 "Added MAC at %d entry in shadow\n", i);
3343 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3344 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3345 "No available place for MAC\n");
3349 return ECORE_SUCCESS;
3352 static enum _ecore_status_t
3353 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3354 struct ecore_vf_info *p_vf,
3355 struct ecore_filter_ucast *p_params)
3357 enum _ecore_status_t rc = ECORE_SUCCESS;
3359 if (p_params->type == ECORE_FILTER_MAC) {
3360 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3361 if (rc != ECORE_SUCCESS)
3365 if (p_params->type == ECORE_FILTER_VLAN)
3366 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3371 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3372 struct ecore_ptt *p_ptt,
3373 struct ecore_vf_info *vf)
3375 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3376 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3377 struct vfpf_ucast_filter_tlv *req;
3378 u8 status = PFVF_STATUS_SUCCESS;
3379 struct ecore_filter_ucast params;
3380 enum _ecore_status_t rc;
3382 /* Prepare the unicast filter params */
3383 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3384 req = &mbx->req_virt->ucast_filter;
3385 params.opcode = (enum ecore_filter_opcode)req->opcode;
3386 params.type = (enum ecore_filter_ucast_type)req->type;
3388 /* @@@TBD - We might need logic on HV side in determining this */
3389 params.is_rx_filter = 1;
3390 params.is_tx_filter = 1;
3391 params.vport_to_remove_from = vf->vport_id;
3392 params.vport_to_add_to = vf->vport_id;
3393 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3394 params.vlan = req->vlan;
3396 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3397 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3398 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3399 vf->abs_vf_id, params.opcode, params.type,
3400 params.is_rx_filter ? "RX" : "",
3401 params.is_tx_filter ? "TX" : "",
3402 params.vport_to_add_to,
3403 params.mac[0], params.mac[1], params.mac[2],
3404 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3406 if (!vf->vport_instance) {
3407 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3408 "No VPORT instance available for VF[%d],"
3409 " failing ucast MAC configuration\n",
3411 status = PFVF_STATUS_FAILURE;
3415 /* Update shadow copy of the VF configuration */
3416 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3418 status = PFVF_STATUS_FAILURE;
3422 /* Determine if the unicast filtering is acceptible by PF */
3423 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3424 (params.type == ECORE_FILTER_VLAN ||
3425 params.type == ECORE_FILTER_MAC_VLAN)) {
3426 /* Once VLAN is forced or PVID is set, do not allow
3427 * to add/replace any further VLANs.
3429 if (params.opcode == ECORE_FILTER_ADD ||
3430 params.opcode == ECORE_FILTER_REPLACE)
3431 status = PFVF_STATUS_FORCED;
3435 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3436 (params.type == ECORE_FILTER_MAC ||
3437 params.type == ECORE_FILTER_MAC_VLAN)) {
3438 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3439 (params.opcode != ECORE_FILTER_ADD &&
3440 params.opcode != ECORE_FILTER_REPLACE))
3441 status = PFVF_STATUS_FORCED;
3445 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3446 if (rc == ECORE_EXISTS) {
3448 } else if (rc == ECORE_INVAL) {
3449 status = PFVF_STATUS_FAILURE;
3453 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3454 ECORE_SPQ_MODE_CB, OSAL_NULL);
3456 status = PFVF_STATUS_FAILURE;
3459 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3460 sizeof(struct pfvf_def_resp_tlv), status);
3463 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3464 struct ecore_ptt *p_ptt,
3465 struct ecore_vf_info *vf)
3470 for (i = 0; i < vf->num_sbs; i++)
3471 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3473 vf->opaque_fid, false);
3475 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3476 sizeof(struct pfvf_def_resp_tlv),
3477 PFVF_STATUS_SUCCESS);
3480 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3481 struct ecore_ptt *p_ptt,
3482 struct ecore_vf_info *vf)
3484 u16 length = sizeof(struct pfvf_def_resp_tlv);
3485 u8 status = PFVF_STATUS_SUCCESS;
3487 /* Disable Interrupts for VF */
3488 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3490 /* Reset Permission table */
3491 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3493 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3497 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3498 struct ecore_ptt *p_ptt,
3499 struct ecore_vf_info *p_vf)
3501 u16 length = sizeof(struct pfvf_def_resp_tlv);
3502 u8 status = PFVF_STATUS_SUCCESS;
3503 enum _ecore_status_t rc = ECORE_SUCCESS;
3505 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3507 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3508 /* Stopping the VF */
3509 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3512 if (rc != ECORE_SUCCESS) {
3513 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3515 status = PFVF_STATUS_FAILURE;
3518 p_vf->state = VF_STOPPED;
3521 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3525 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
3526 struct ecore_ptt *p_ptt,
3527 struct ecore_vf_info *p_vf)
3529 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3530 struct pfvf_read_coal_resp_tlv *p_resp;
3531 struct vfpf_read_coal_req_tlv *req;
3532 u8 status = PFVF_STATUS_FAILURE;
3533 struct ecore_vf_queue *p_queue;
3534 struct ecore_queue_cid *p_cid;
3535 enum _ecore_status_t rc = ECORE_SUCCESS;
3536 u16 coal = 0, qid, i;
3539 mbx->offset = (u8 *)mbx->reply_virt;
3540 req = &mbx->req_virt->read_coal_req;
3543 b_is_rx = req->is_rx ? true : false;
3546 if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
3547 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3548 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3549 "VF[%d]: Invalid Rx queue_id = %d\n",
3550 p_vf->abs_vf_id, qid);
3554 p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3555 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3556 if (rc != ECORE_SUCCESS)
3559 if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
3560 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3561 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3562 "VF[%d]: Invalid Tx queue_id = %d\n",
3563 p_vf->abs_vf_id, qid);
3566 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3567 p_queue = &p_vf->vf_queues[qid];
3568 if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
3569 (!p_queue->cids[i].b_is_tx))
3572 p_cid = p_queue->cids[i].p_cid;
3574 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
3576 if (rc != ECORE_SUCCESS)
3582 status = PFVF_STATUS_SUCCESS;
3585 p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
3587 p_resp->coal = coal;
3589 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
3590 sizeof(struct channel_list_end_tlv));
3592 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3595 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3596 struct ecore_ptt *p_ptt,
3597 struct ecore_vf_info *vf)
3599 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3600 enum _ecore_status_t rc = ECORE_SUCCESS;
3601 struct vfpf_update_coalesce *req;
3602 u8 status = PFVF_STATUS_FAILURE;
3603 struct ecore_queue_cid *p_cid;
3604 u16 rx_coal, tx_coal;
3608 req = &mbx->req_virt->update_coalesce;
3610 rx_coal = req->rx_coal;
3611 tx_coal = req->tx_coal;
3614 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3615 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3617 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3618 vf->abs_vf_id, qid);
3622 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3623 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3625 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3626 vf->abs_vf_id, qid);
3630 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3631 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3632 vf->abs_vf_id, rx_coal, tx_coal, qid);
3635 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3637 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3638 if (rc != ECORE_SUCCESS) {
3639 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3640 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3641 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3644 vf->rx_coal = rx_coal;
3647 /* TODO - in future, it might be possible to pass this in a per-cid
3648 * granularity. For now, do this for all Tx queues.
3651 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3653 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3654 if (p_queue->cids[i].p_cid == OSAL_NULL)
3657 if (!p_queue->cids[i].b_is_tx)
3660 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3661 p_queue->cids[i].p_cid);
3662 if (rc != ECORE_SUCCESS) {
3663 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3664 "VF[%d]: Unable to set tx queue coalesce\n",
3669 vf->tx_coal = tx_coal;
3672 status = PFVF_STATUS_SUCCESS;
3674 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3675 sizeof(struct pfvf_def_resp_tlv), status);
3678 enum _ecore_status_t
3679 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
3680 u16 rx_coal, u16 tx_coal,
3683 struct ecore_queue_cid *p_cid;
3684 struct ecore_vf_info *vf;
3685 struct ecore_ptt *p_ptt;
3688 if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
3689 DP_NOTICE(p_hwfn, true,
3690 "VF[%d] - Can not set coalescing: VF is not active\n",
3695 vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
3696 p_ptt = ecore_ptt_acquire(p_hwfn);
3700 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3701 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3703 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3704 vf->abs_vf_id, qid);
3708 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3709 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3711 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3712 vf->abs_vf_id, qid);
3716 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3717 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3718 vf->abs_vf_id, rx_coal, tx_coal, qid);
3721 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3723 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3724 if (rc != ECORE_SUCCESS) {
3725 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3726 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3727 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3730 vf->rx_coal = rx_coal;
3733 /* TODO - in future, it might be possible to pass this in a per-cid
3734 * granularity. For now, do this for all Tx queues.
3737 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3739 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3740 if (p_queue->cids[i].p_cid == OSAL_NULL)
3743 if (!p_queue->cids[i].b_is_tx)
3746 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3747 p_queue->cids[i].p_cid);
3748 if (rc != ECORE_SUCCESS) {
3749 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3750 "VF[%d]: Unable to set tx queue coalesce\n",
3755 vf->tx_coal = tx_coal;
3759 ecore_ptt_release(p_hwfn, p_ptt);
3764 static enum _ecore_status_t
3765 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3766 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3771 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3773 for (cnt = 0; cnt < 50; cnt++) {
3774 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3779 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3783 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3784 p_vf->abs_vf_id, val);
3785 return ECORE_TIMEOUT;
3788 return ECORE_SUCCESS;
3791 static enum _ecore_status_t
3792 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3793 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3795 u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3798 /* Read initial consumers & producers */
3799 for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3802 cons[i] = ecore_rd(p_hwfn, p_ptt,
3803 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3805 prod = ecore_rd(p_hwfn, p_ptt,
3806 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3808 distance[i] = prod - cons[i];
3811 /* Wait for consumers to pass the producers */
3813 for (cnt = 0; cnt < 50; cnt++) {
3814 for (; i < MAX_NUM_VOQS_E4; i++) {
3817 tmp = ecore_rd(p_hwfn, p_ptt,
3818 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3820 if (distance[i] > tmp - cons[i])
3824 if (i == MAX_NUM_VOQS_E4)
3831 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3832 p_vf->abs_vf_id, i);
3833 return ECORE_TIMEOUT;
3836 return ECORE_SUCCESS;
3839 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3840 struct ecore_vf_info *p_vf,
3841 struct ecore_ptt *p_ptt)
3843 enum _ecore_status_t rc;
3845 /* TODO - add SRC and TM polling once we add storage IOV */
3847 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3851 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3855 return ECORE_SUCCESS;
3858 static enum _ecore_status_t
3859 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3860 struct ecore_ptt *p_ptt,
3861 u16 rel_vf_id, u32 *ack_vfs)
3863 struct ecore_vf_info *p_vf;
3864 enum _ecore_status_t rc = ECORE_SUCCESS;
3866 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3868 return ECORE_SUCCESS;
3870 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3871 (1ULL << (rel_vf_id % 64))) {
3872 u16 vfid = p_vf->abs_vf_id;
3874 /* TODO - should we lock channel? */
3876 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3877 "VF[%d] - Handling FLR\n", vfid);
3879 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3881 /* If VF isn't active, no need for anything but SW */
3885 /* TODO - what to do in case of failure? */
3886 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3887 if (rc != ECORE_SUCCESS)
3890 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3892 /* TODO - what's now? What a mess.... */
3893 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3897 /* Workaround to make VF-PF channel ready, as FW
3898 * doesn't do that as a part of FLR.
3901 GTT_BAR0_MAP_REG_USDM_RAM +
3902 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3904 /* VF_STOPPED has to be set only after final cleanup
3905 * but prior to re-enabling the VF.
3907 p_vf->state = VF_STOPPED;
3909 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3911 /* TODO - again, a mess... */
3912 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3917 /* Mark VF for ack and clean pending state */
3918 if (p_vf->state == VF_RESET)
3919 p_vf->state = VF_STOPPED;
3920 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3921 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3922 ~(1ULL << (rel_vf_id % 64));
3923 p_vf->vf_mbx.b_pending_msg = false;
3929 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3930 struct ecore_ptt *p_ptt)
3932 u32 ack_vfs[VF_MAX_STATIC / 32];
3933 enum _ecore_status_t rc = ECORE_SUCCESS;
3936 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3938 /* Since BRB <-> PRS interface can't be tested as part of the flr
3939 * polling due to HW limitations, simply sleep a bit. And since
3940 * there's no need to wait per-vf, do it before looping.
3944 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3945 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3947 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3951 enum _ecore_status_t
3952 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3953 struct ecore_ptt *p_ptt, u16 rel_vf_id)
3955 u32 ack_vfs[VF_MAX_STATIC / 32];
3956 enum _ecore_status_t rc = ECORE_SUCCESS;
3958 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3960 /* Wait instead of polling the BRB <-> PRS interface */
3963 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3965 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3969 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3974 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3975 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3976 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3977 "[%08x,...,%08x]: %08x\n",
3978 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3980 if (!p_hwfn->p_dev->p_iov_info) {
3981 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3986 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3987 struct ecore_vf_info *p_vf;
3990 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
3994 vfid = p_vf->abs_vf_id;
3995 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3996 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3997 u16 rel_vf_id = p_vf->relative_vf_id;
3999 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4000 "VF[%d] [rel %d] got FLR-ed\n",
4003 p_vf->state = VF_RESET;
4005 /* No need to lock here, since pending_flr should
4006 * only change here and before ACKing MFw. Since
4007 * MFW will not trigger an additional attention for
4008 * VF flr until ACKs, we're safe.
4010 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
4018 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
4020 struct ecore_mcp_link_params *p_params,
4021 struct ecore_mcp_link_state *p_link,
4022 struct ecore_mcp_link_capabilities *p_caps)
4024 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
4025 struct ecore_bulletin_content *p_bulletin;
4030 p_bulletin = p_vf->bulletin.p_virt;
4033 __ecore_vf_get_link_params(p_params, p_bulletin);
4035 __ecore_vf_get_link_state(p_link, p_bulletin);
4037 __ecore_vf_get_link_caps(p_caps, p_bulletin);
4040 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
4041 struct ecore_ptt *p_ptt, int vfid)
4043 struct ecore_iov_vf_mbx *mbx;
4044 struct ecore_vf_info *p_vf;
4046 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4050 mbx = &p_vf->vf_mbx;
4052 /* ecore_iov_process_mbx_request */
4053 #ifndef CONFIG_ECORE_SW_CHANNEL
4054 if (!mbx->b_pending_msg) {
4055 DP_NOTICE(p_hwfn, true,
4056 "VF[%02x]: Trying to process mailbox message when none is pending\n",
4060 mbx->b_pending_msg = false;
4063 mbx->first_tlv = mbx->req_virt->first_tlv;
4065 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4066 "VF[%02x]: Processing mailbox message [type %04x]\n",
4067 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4069 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
4070 p_vf->relative_vf_id,
4071 mbx->first_tlv.tl.type);
4073 /* Lock the per vf op mutex and note the locker's identity.
4074 * The unlock will take place in mbx response.
4076 ecore_iov_lock_vf_pf_channel(p_hwfn,
4077 p_vf, mbx->first_tlv.tl.type);
4079 /* check if tlv type is known */
4080 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
4081 !p_vf->b_malicious) {
4082 /* switch on the opcode */
4083 switch (mbx->first_tlv.tl.type) {
4084 case CHANNEL_TLV_ACQUIRE:
4085 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
4087 case CHANNEL_TLV_VPORT_START:
4088 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
4090 case CHANNEL_TLV_VPORT_TEARDOWN:
4091 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
4093 case CHANNEL_TLV_START_RXQ:
4094 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
4096 case CHANNEL_TLV_START_TXQ:
4097 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
4099 case CHANNEL_TLV_STOP_RXQS:
4100 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
4102 case CHANNEL_TLV_STOP_TXQS:
4103 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
4105 case CHANNEL_TLV_UPDATE_RXQ:
4106 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
4108 case CHANNEL_TLV_VPORT_UPDATE:
4109 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
4111 case CHANNEL_TLV_UCAST_FILTER:
4112 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
4114 case CHANNEL_TLV_CLOSE:
4115 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
4117 case CHANNEL_TLV_INT_CLEANUP:
4118 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
4120 case CHANNEL_TLV_RELEASE:
4121 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
4123 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
4124 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
4126 case CHANNEL_TLV_COALESCE_UPDATE:
4127 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
4129 case CHANNEL_TLV_COALESCE_READ:
4130 ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
4133 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
4134 /* If we've received a message from a VF we consider malicious
4135 * we ignore the messasge unless it's one for RELEASE, in which
4136 * case we'll let it have the benefit of doubt, allowing the
4137 * next loaded driver to start again.
4139 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
4140 /* TODO - initiate FLR, remove malicious indication */
4141 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4142 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
4145 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4146 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
4147 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4150 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4151 mbx->first_tlv.tl.type,
4152 sizeof(struct pfvf_def_resp_tlv),
4153 PFVF_STATUS_MALICIOUS);
4155 /* unknown TLV - this may belong to a VF driver from the future
4156 * - a version written after this PF driver was written, which
4157 * supports features unknown as of yet. Too bad since we don't
4158 * support them. Or this may be because someone wrote a crappy
4159 * VF driver and is sending garbage over the channel.
4161 DP_NOTICE(p_hwfn, false,
4162 "VF[%02x]: unknown TLV. type %04x length %04x"
4163 " padding %08x reply address %lu\n",
4165 mbx->first_tlv.tl.type,
4166 mbx->first_tlv.tl.length,
4167 mbx->first_tlv.padding,
4168 (unsigned long)mbx->first_tlv.reply_address);
4170 /* Try replying in case reply address matches the acquisition's
4173 if (p_vf->acquire.first_tlv.reply_address &&
4174 (mbx->first_tlv.reply_address ==
4175 p_vf->acquire.first_tlv.reply_address))
4176 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4177 mbx->first_tlv.tl.type,
4178 sizeof(struct pfvf_def_resp_tlv),
4179 PFVF_STATUS_NOT_SUPPORTED);
4181 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4182 "VF[%02x]: Can't respond to TLV -"
4183 " no valid reply address\n",
4187 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
4188 mbx->first_tlv.tl.type);
4190 #ifdef CONFIG_ECORE_SW_CHANNEL
4191 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
4192 mbx->sw_mbx.response_offset = 0;
4196 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
4201 OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4203 ecore_for_each_vf(p_hwfn, i) {
4204 struct ecore_vf_info *p_vf;
4206 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
4207 if (p_vf->vf_mbx.b_pending_msg)
4208 events[i / 64] |= 1ULL << (i % 64);
4212 static struct ecore_vf_info *
4213 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
4215 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
4217 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4218 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4219 "Got indication for VF [abs 0x%08x] that cannot be"
4225 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
4228 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
4230 struct regpair *vf_msg)
4232 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
4236 return ECORE_SUCCESS;
4238 /* List the physical address of the request so that handler
4239 * could later on copy the message from it.
4241 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4243 p_vf->vf_mbx.b_pending_msg = true;
4245 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
4248 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
4249 struct malicious_vf_eqe_data *p_data)
4251 struct ecore_vf_info *p_vf;
4253 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4258 if (!p_vf->b_malicious) {
4259 DP_NOTICE(p_hwfn, false,
4260 "VF [%d] - Malicious behavior [%02x]\n",
4261 p_vf->abs_vf_id, p_data->err_id);
4263 p_vf->b_malicious = true;
4266 "VF [%d] - Malicious behavior [%02x]\n",
4267 p_vf->abs_vf_id, p_data->err_id);
4270 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
4273 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
4276 union event_ring_data *data,
4277 u8 OSAL_UNUSED fw_return_code)
4280 case COMMON_EVENT_VF_PF_CHANNEL:
4281 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
4282 &data->vf_pf_channel.msg_addr);
4283 case COMMON_EVENT_VF_FLR:
4284 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4285 "VF-FLR is still not supported\n");
4286 return ECORE_SUCCESS;
4287 case COMMON_EVENT_MALICIOUS_VF:
4288 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4289 return ECORE_SUCCESS;
4291 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
4297 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4299 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4300 (1ULL << (rel_vf_id % 64)));
4303 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4305 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
4311 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4312 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4316 return MAX_NUM_VFS_E4;
4319 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4320 struct ecore_ptt *ptt, int vfid)
4322 struct ecore_dmae_params params;
4323 struct ecore_vf_info *vf_info;
4325 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4329 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
4330 params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
4331 params.src_vfid = vf_info->abs_vf_id;
4333 if (ecore_dmae_host2host(p_hwfn, ptt,
4334 vf_info->vf_mbx.pending_req,
4335 vf_info->vf_mbx.req_phys,
4336 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4337 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4338 "Failed to copy message from VF 0x%02x\n", vfid);
4343 return ECORE_SUCCESS;
4346 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4349 struct ecore_vf_info *vf_info;
4352 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4354 DP_NOTICE(p_hwfn->p_dev, true,
4355 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4358 if (vf_info->b_malicious) {
4359 DP_NOTICE(p_hwfn->p_dev, false,
4360 "Can't set forced MAC to malicious VF [%d]\n",
4365 feature = 1 << MAC_ADDR_FORCED;
4366 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4368 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4369 /* Forced MAC will disable MAC_ADDR */
4370 vf_info->bulletin.p_virt->valid_bitmap &=
4371 ~(1 << VFPF_BULLETIN_MAC_ADDR);
4373 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4376 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4379 struct ecore_vf_info *vf_info;
4382 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4384 DP_NOTICE(p_hwfn->p_dev, true,
4385 "Can not set MAC, invalid vfid [%d]\n", vfid);
4388 if (vf_info->b_malicious) {
4389 DP_NOTICE(p_hwfn->p_dev, false,
4390 "Can't set MAC to malicious VF [%d]\n",
4395 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4396 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4397 "Can not set MAC, Forced MAC is configured\n");
4401 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4402 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4404 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4406 return ECORE_SUCCESS;
4409 enum _ecore_status_t
4410 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4411 bool b_untagged_only, int vfid)
4413 struct ecore_vf_info *vf_info;
4416 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4418 DP_NOTICE(p_hwfn->p_dev, true,
4419 "Can not set untagged default, invalid vfid [%d]\n",
4423 if (vf_info->b_malicious) {
4424 DP_NOTICE(p_hwfn->p_dev, false,
4425 "Can't set untagged default to malicious VF [%d]\n",
4430 /* Since this is configurable only during vport-start, don't take it
4431 * if we're past that point.
4433 if (vf_info->state == VF_ENABLED) {
4434 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4435 "Can't support untagged change for vfid[%d] -"
4436 " VF is already active\n",
4441 /* Set configuration; This will later be taken into account during the
4442 * VF initialization.
4444 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4445 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4446 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4448 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4451 return ECORE_SUCCESS;
4454 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4457 struct ecore_vf_info *vf_info;
4459 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4463 *opaque_fid = vf_info->opaque_fid;
4466 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4469 struct ecore_vf_info *vf_info;
4472 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4474 DP_NOTICE(p_hwfn->p_dev, true,
4475 "Can not set forced MAC, invalid vfid [%d]\n",
4479 if (vf_info->b_malicious) {
4480 DP_NOTICE(p_hwfn->p_dev, false,
4481 "Can't set forced vlan to malicious VF [%d]\n",
4486 feature = 1 << VLAN_ADDR_FORCED;
4487 vf_info->bulletin.p_virt->pvid = pvid;
4489 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4491 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4493 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4496 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4497 int vfid, u16 vxlan_port, u16 geneve_port)
4499 struct ecore_vf_info *vf_info;
4501 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4503 DP_NOTICE(p_hwfn->p_dev, true,
4504 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4508 if (vf_info->b_malicious) {
4509 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4510 "Can not set udp ports to malicious VF [%d]\n",
4515 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4516 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4519 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4521 struct ecore_vf_info *p_vf_info;
4523 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4527 return !!p_vf_info->vport_instance;
4530 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4532 struct ecore_vf_info *p_vf_info;
4534 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4538 return p_vf_info->state == VF_STOPPED;
4541 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4543 struct ecore_vf_info *vf_info;
4545 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4549 return vf_info->spoof_chk;
4552 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4555 struct ecore_vf_info *vf;
4556 enum _ecore_status_t rc = ECORE_INVAL;
4558 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4559 DP_NOTICE(p_hwfn, true,
4560 "SR-IOV sanity check failed, can't set spoofchk\n");
4564 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4568 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4569 /* After VF VPORT start PF will configure spoof check */
4570 vf->req_spoofchk_val = val;
4575 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4581 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4583 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4585 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4586 : ECORE_MAX_VF_CHAINS_PER_PF;
4588 return max_chains_per_vf;
4591 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4593 void **pp_req_virt_addr,
4594 u16 *p_req_virt_size)
4596 struct ecore_vf_info *vf_info =
4597 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4602 if (pp_req_virt_addr)
4603 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4605 if (p_req_virt_size)
4606 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4609 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4611 void **pp_reply_virt_addr,
4612 u16 *p_reply_virt_size)
4614 struct ecore_vf_info *vf_info =
4615 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4620 if (pp_reply_virt_addr)
4621 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4623 if (p_reply_virt_size)
4624 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4627 #ifdef CONFIG_ECORE_SW_CHANNEL
4628 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4631 struct ecore_vf_info *vf_info =
4632 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4637 return &vf_info->vf_mbx.sw_mbx;
4641 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4643 return (length >= sizeof(struct vfpf_first_tlv) &&
4644 (length <= sizeof(union vfpf_tlvs)));
4647 u32 ecore_iov_pfvf_msg_length(void)
4649 return sizeof(union pfvf_tlvs);
4652 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4654 struct ecore_vf_info *p_vf;
4656 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4657 if (!p_vf || !p_vf->bulletin.p_virt)
4660 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4663 return p_vf->bulletin.p_virt->mac;
4666 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4669 struct ecore_vf_info *p_vf;
4671 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4672 if (!p_vf || !p_vf->bulletin.p_virt)
4675 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4678 return p_vf->bulletin.p_virt->pvid;
4681 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4682 struct ecore_ptt *p_ptt,
4685 struct ecore_mcp_link_state *p_link;
4686 struct ecore_vf_info *vf;
4688 enum _ecore_status_t rc;
4690 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4695 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4696 if (rc != ECORE_SUCCESS)
4699 p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
4701 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
4705 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4706 struct ecore_ptt *p_ptt,
4708 struct ecore_eth_stats *p_stats)
4710 struct ecore_vf_info *vf;
4712 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4716 if (vf->state != VF_ENABLED)
4719 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4720 vf->abs_vf_id + 0x10, false);
4722 return ECORE_SUCCESS;
4725 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4727 struct ecore_vf_info *p_vf;
4729 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4733 return p_vf->num_rxqs;
4736 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4738 struct ecore_vf_info *p_vf;
4740 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4744 return p_vf->num_active_rxqs;
4747 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4749 struct ecore_vf_info *p_vf;
4751 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4758 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4760 struct ecore_vf_info *p_vf;
4762 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4766 return p_vf->num_sbs;
4769 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4771 struct ecore_vf_info *p_vf;
4773 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4777 return (p_vf->state == VF_FREE);
4780 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4783 struct ecore_vf_info *p_vf;
4785 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4789 return (p_vf->state == VF_ACQUIRED);
4792 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4794 struct ecore_vf_info *p_vf;
4796 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4800 return (p_vf->state == VF_ENABLED);
4803 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4806 struct ecore_vf_info *p_vf;
4808 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4812 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4815 enum _ecore_status_t
4816 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4818 struct ecore_wfq_data *vf_vp_wfq;
4819 struct ecore_vf_info *vf_info;
4821 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4825 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4827 if (vf_vp_wfq->configured)
4828 return vf_vp_wfq->min_speed;
4833 #ifdef CONFIG_ECORE_SW_CHANNEL
4834 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
4837 struct ecore_vf_info *vf_info;
4839 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4843 vf_info->b_hw_channel = b_is_hw;