2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
30 const char *ecore_channel_tlvs_string[] = {
31 "CHANNEL_TLV_NONE", /* ends tlv sequence */
32 "CHANNEL_TLV_ACQUIRE",
33 "CHANNEL_TLV_VPORT_START",
34 "CHANNEL_TLV_VPORT_UPDATE",
35 "CHANNEL_TLV_VPORT_TEARDOWN",
36 "CHANNEL_TLV_START_RXQ",
37 "CHANNEL_TLV_START_TXQ",
38 "CHANNEL_TLV_STOP_RXQ",
39 "CHANNEL_TLV_STOP_TXQ",
40 "CHANNEL_TLV_UPDATE_RXQ",
41 "CHANNEL_TLV_INT_CLEANUP",
43 "CHANNEL_TLV_RELEASE",
44 "CHANNEL_TLV_LIST_END",
45 "CHANNEL_TLV_UCAST_FILTER",
46 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51 "CHANNEL_TLV_VPORT_UPDATE_RSS",
52 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
54 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
55 "CHANNEL_TLV_COALESCE_UPDATE",
57 "CHANNEL_TLV_COALESCE_READ",
61 static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
65 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
66 ETH_HSI_VER_NO_PKT_LEN_TUNN)
67 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
69 if (!(p_vf->acquire.vfdev_info.capabilities &
70 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
71 legacy |= ECORE_QCID_LEGACY_VF_CID;
77 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
78 struct ecore_vf_info *p_vf)
80 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
81 struct ecore_spq_entry *p_ent = OSAL_NULL;
82 struct ecore_sp_init_data init_data;
83 enum _ecore_status_t rc = ECORE_NOTIMPL;
87 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
88 init_data.cid = ecore_spq_get_cid(p_hwfn);
89 init_data.opaque_fid = p_vf->opaque_fid;
90 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
92 rc = ecore_sp_init_request(p_hwfn, &p_ent,
93 COMMON_RAMROD_VF_START,
94 PROTOCOLID_COMMON, &init_data);
95 if (rc != ECORE_SUCCESS)
98 p_ramrod = &p_ent->ramrod.vf_start;
100 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
101 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
103 switch (p_hwfn->hw_info.personality) {
105 p_ramrod->personality = PERSONALITY_ETH;
107 case ECORE_PCI_ETH_ROCE:
108 case ECORE_PCI_ETH_IWARP:
109 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
112 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
113 p_hwfn->hw_info.personality);
117 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
118 if (fp_minor > ETH_HSI_VER_MINOR &&
119 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
120 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
121 "VF [%d] - Requested fp hsi %02x.%02x which is"
122 " slightly newer than PF's %02x.%02x; Configuring"
125 ETH_HSI_VER_MAJOR, fp_minor,
126 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
127 fp_minor = ETH_HSI_VER_MINOR;
130 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
131 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
133 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
134 "VF[%d] - Starting using HSI %02x.%02x\n",
135 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
137 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
140 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
144 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
145 struct ecore_spq_entry *p_ent = OSAL_NULL;
146 struct ecore_sp_init_data init_data;
147 enum _ecore_status_t rc = ECORE_NOTIMPL;
150 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
151 init_data.cid = ecore_spq_get_cid(p_hwfn);
152 init_data.opaque_fid = opaque_vfid;
153 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
155 rc = ecore_sp_init_request(p_hwfn, &p_ent,
156 COMMON_RAMROD_VF_STOP,
157 PROTOCOLID_COMMON, &init_data);
158 if (rc != ECORE_SUCCESS)
161 p_ramrod = &p_ent->ramrod.vf_stop;
163 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
165 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
168 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
169 bool b_enabled_only, bool b_non_malicious)
171 if (!p_hwfn->pf_iov_info) {
172 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
176 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
180 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
184 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
191 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
195 struct ecore_vf_info *vf = OSAL_NULL;
197 if (!p_hwfn->pf_iov_info) {
198 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
202 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
203 b_enabled_only, false))
204 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
206 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
212 static struct ecore_queue_cid *
213 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
217 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
218 if (p_queue->cids[i].p_cid &&
219 !p_queue->cids[i].b_is_tx)
220 return p_queue->cids[i].p_cid;
226 enum ecore_iov_validate_q_mode {
227 ECORE_IOV_VALIDATE_Q_NA,
228 ECORE_IOV_VALIDATE_Q_ENABLE,
229 ECORE_IOV_VALIDATE_Q_DISABLE,
232 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
234 enum ecore_iov_validate_q_mode mode,
239 if (mode == ECORE_IOV_VALIDATE_Q_NA)
242 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
243 struct ecore_vf_queue_cid *p_qcid;
245 p_qcid = &p_vf->vf_queues[qid].cids[i];
247 if (p_qcid->p_cid == OSAL_NULL)
250 if (p_qcid->b_is_tx != b_is_tx)
253 /* Found. It's enabled. */
254 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
257 /* In case we haven't found any valid cid, then its disabled */
258 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
261 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
262 struct ecore_vf_info *p_vf,
264 enum ecore_iov_validate_q_mode mode)
266 if (rx_qid >= p_vf->num_rxqs) {
267 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
268 "VF[0x%02x] - can't touch Rx queue[%04x];"
269 " Only 0x%04x are allocated\n",
270 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
274 return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
277 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
278 struct ecore_vf_info *p_vf,
280 enum ecore_iov_validate_q_mode mode)
282 if (tx_qid >= p_vf->num_txqs) {
283 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
284 "VF[0x%02x] - can't touch Tx queue[%04x];"
285 " Only 0x%04x are allocated\n",
286 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
290 return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
293 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
294 struct ecore_vf_info *p_vf,
299 for (i = 0; i < p_vf->num_sbs; i++)
300 if (p_vf->igu_sbs[i] == sb_idx)
303 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
304 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
305 " one of its 0x%02x SBs\n",
306 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
311 /* Is there at least 1 queue open? */
312 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
316 for (i = 0; i < p_vf->num_rxqs; i++)
317 if (ecore_iov_validate_queue_mode(p_vf, i,
318 ECORE_IOV_VALIDATE_Q_ENABLE,
325 static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
329 for (i = 0; i < p_vf->num_txqs; i++)
330 if (ecore_iov_validate_queue_mode(p_vf, i,
331 ECORE_IOV_VALIDATE_Q_ENABLE,
338 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
340 struct ecore_ptt *p_ptt)
342 struct ecore_bulletin_content *p_bulletin;
343 int crc_size = sizeof(p_bulletin->crc);
344 struct ecore_dmae_params params;
345 struct ecore_vf_info *p_vf;
347 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
351 /* TODO - check VF is in a state where it can accept message */
352 if (!p_vf->vf_bulletin)
355 p_bulletin = p_vf->bulletin.p_virt;
357 /* Increment bulletin board version and compute crc */
358 p_bulletin->version++;
359 p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
360 p_vf->bulletin.size - crc_size);
362 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
363 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
364 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
366 /* propagate bulletin board via dmae to vm memory */
367 OSAL_MEMSET(¶ms, 0, sizeof(params));
368 params.flags = ECORE_DMAE_FLAG_VF_DST;
369 params.dst_vfid = p_vf->abs_vf_id;
370 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
371 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
375 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
377 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
380 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
381 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
383 OSAL_PCI_READ_CONFIG_WORD(p_dev,
384 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
385 OSAL_PCI_READ_CONFIG_WORD(p_dev,
386 pos + PCI_SRIOV_INITIAL_VF,
389 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
391 /* @@@TODO - in future we might want to add an OSAL here to
392 * allow each OS to decide on its own how to act.
394 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
395 "Number of VFs are already set to non-zero value."
396 " Ignoring PCI configuration value\n");
400 OSAL_PCI_READ_CONFIG_WORD(p_dev,
401 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
403 OSAL_PCI_READ_CONFIG_WORD(p_dev,
404 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
406 OSAL_PCI_READ_CONFIG_WORD(p_dev,
407 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
409 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
410 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
412 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
414 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
416 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
417 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
418 " stride %d, page size 0x%x\n",
419 iov->nres, iov->cap, iov->ctrl,
420 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
421 iov->offset, iov->stride, iov->pgsz);
423 /* Some sanity checks */
424 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
425 iov->total_vfs > NUM_OF_VFS(p_dev)) {
426 /* This can happen only due to a bug. In this case we set
427 * num_vfs to zero to avoid memory corruption in the code that
428 * assumes max number of vfs
430 DP_NOTICE(p_dev, false,
431 "IOV: Unexpected number of vfs set: %d"
432 " setting num_vf to zero\n",
439 return ECORE_SUCCESS;
442 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
444 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
445 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
446 struct ecore_bulletin_content *p_bulletin_virt;
447 dma_addr_t req_p, rply_p, bulletin_p;
448 union pfvf_tlvs *p_reply_virt_addr;
449 union vfpf_tlvs *p_req_virt_addr;
452 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
454 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
455 req_p = p_iov_info->mbx_msg_phys_addr;
456 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
457 rply_p = p_iov_info->mbx_reply_phys_addr;
458 p_bulletin_virt = p_iov_info->p_bulletins;
459 bulletin_p = p_iov_info->bulletins_phys;
460 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
462 "ecore_iov_setup_vfdb called without alloc mem first\n");
466 for (idx = 0; idx < p_iov->total_vfs; idx++) {
467 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
470 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
471 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
472 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
473 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
475 #ifdef CONFIG_ECORE_SW_CHANNEL
476 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
477 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
479 vf->state = VF_STOPPED;
482 vf->bulletin.phys = idx *
483 sizeof(struct ecore_bulletin_content) + bulletin_p;
484 vf->bulletin.p_virt = p_bulletin_virt + idx;
485 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
487 vf->relative_vf_id = idx;
488 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
489 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
490 vf->concrete_fid = concrete;
491 /* TODO - need to devise a better way of getting opaque */
492 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
493 (vf->abs_vf_id << 8);
495 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
496 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
500 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
502 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
506 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
508 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
509 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
511 /* Allocate PF Mailbox buffer (per-VF) */
512 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
513 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
514 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
515 &p_iov_info->mbx_msg_phys_addr,
516 p_iov_info->mbx_msg_size);
520 /* Allocate PF Mailbox Reply buffer (per-VF) */
521 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
522 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
523 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
524 &p_iov_info->mbx_reply_phys_addr,
525 p_iov_info->mbx_reply_size);
529 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
531 p_v_addr = &p_iov_info->p_bulletins;
532 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
533 &p_iov_info->bulletins_phys,
534 p_iov_info->bulletins_size);
538 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
539 "PF's Requests mailbox [%p virt 0x%lx phys], "
540 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
541 " [%p virt 0x%lx phys]\n",
542 p_iov_info->mbx_msg_virt_addr,
543 (unsigned long)p_iov_info->mbx_msg_phys_addr,
544 p_iov_info->mbx_reply_virt_addr,
545 (unsigned long)p_iov_info->mbx_reply_phys_addr,
546 p_iov_info->p_bulletins,
547 (unsigned long)p_iov_info->bulletins_phys);
549 return ECORE_SUCCESS;
552 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
554 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
556 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
557 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
558 p_iov_info->mbx_msg_virt_addr,
559 p_iov_info->mbx_msg_phys_addr,
560 p_iov_info->mbx_msg_size);
562 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
563 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
564 p_iov_info->mbx_reply_virt_addr,
565 p_iov_info->mbx_reply_phys_addr,
566 p_iov_info->mbx_reply_size);
568 if (p_iov_info->p_bulletins)
569 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
570 p_iov_info->p_bulletins,
571 p_iov_info->bulletins_phys,
572 p_iov_info->bulletins_size);
575 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
577 struct ecore_pf_iov *p_sriov;
579 if (!IS_PF_SRIOV(p_hwfn)) {
580 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
581 "No SR-IOV - no need for IOV db\n");
582 return ECORE_SUCCESS;
585 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
587 DP_NOTICE(p_hwfn, true,
588 "Failed to allocate `struct ecore_sriov'\n");
592 p_hwfn->pf_iov_info = p_sriov;
594 return ecore_iov_allocate_vfdb(p_hwfn);
597 void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
599 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
602 ecore_iov_setup_vfdb(p_hwfn);
605 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
607 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
608 ecore_iov_free_vfdb(p_hwfn);
609 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
613 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
615 OSAL_FREE(p_dev, p_dev->p_iov_info);
618 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
620 struct ecore_dev *p_dev = p_hwfn->p_dev;
622 enum _ecore_status_t rc;
624 if (IS_VF(p_hwfn->p_dev))
625 return ECORE_SUCCESS;
627 /* Learn the PCI configuration */
628 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
629 PCI_EXT_CAP_ID_SRIOV);
631 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
632 return ECORE_SUCCESS;
635 /* Allocate a new struct for IOV information */
636 /* TODO - can change to VALLOC when its available */
637 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
638 sizeof(*p_dev->p_iov_info));
639 if (!p_dev->p_iov_info) {
640 DP_NOTICE(p_hwfn, true,
641 "Can't support IOV due to lack of memory\n");
644 p_dev->p_iov_info->pos = pos;
646 rc = ecore_iov_pci_cfg_info(p_dev);
650 /* We want PF IOV to be synonemous with the existence of p_iov_info;
651 * In case the capability is published but there are no VFs, simply
652 * de-allocate the struct.
654 if (!p_dev->p_iov_info->total_vfs) {
655 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
656 "IOV capabilities, but no VFs are published\n");
657 OSAL_FREE(p_dev, p_dev->p_iov_info);
658 return ECORE_SUCCESS;
661 /* First VF index based on offset is tricky:
662 * - If ARI is supported [likely], offset - (16 - pf_id) would
663 * provide the number for eng0. 2nd engine Vfs would begin
664 * after the first engine's VFs.
665 * - If !ARI, VFs would start on next device.
666 * so offset - (256 - pf_id) would provide the number.
667 * Utilize the fact that (256 - pf_id) is achieved only be later
668 * to diffrentiate between the two.
671 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
672 u32 first = p_hwfn->p_dev->p_iov_info->offset +
673 p_hwfn->abs_pf_id - 16;
675 p_dev->p_iov_info->first_vf_in_pf = first;
677 if (ECORE_PATH_ID(p_hwfn))
678 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
680 u32 first = p_hwfn->p_dev->p_iov_info->offset +
681 p_hwfn->abs_pf_id - 256;
683 p_dev->p_iov_info->first_vf_in_pf = first;
686 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
687 "First VF in hwfn 0x%08x\n",
688 p_dev->p_iov_info->first_vf_in_pf);
690 return ECORE_SUCCESS;
693 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
694 bool b_fail_malicious)
696 /* Check PF supports sriov */
697 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
698 !IS_PF_SRIOV_ALLOC(p_hwfn))
701 /* Check VF validity */
702 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
708 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
710 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
713 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
714 u16 rel_vf_id, u8 to_disable)
716 struct ecore_vf_info *vf;
719 for_each_hwfn(p_dev, i) {
720 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
722 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
726 vf->to_disable = to_disable;
730 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
735 if (!IS_ECORE_SRIOV(p_dev))
738 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
739 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
743 /* @@@TBD Consider taking outside of ecore... */
744 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
748 enum _ecore_status_t rc = ECORE_SUCCESS;
749 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
751 if (vf != OSAL_NULL) {
753 #ifdef CONFIG_ECORE_SW_CHANNEL
754 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
757 rc = ECORE_UNKNOWN_ERROR;
763 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
764 struct ecore_ptt *p_ptt,
767 ecore_wr(p_hwfn, p_ptt,
768 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
769 1 << (abs_vfid & 0x1f));
772 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
773 struct ecore_ptt *p_ptt,
774 struct ecore_vf_info *vf)
778 /* Set VF masks and configuration - pretend */
779 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
781 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
784 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
786 /* iterate over all queues, clear sb consumer */
787 for (i = 0; i < vf->num_sbs; i++)
788 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
790 vf->opaque_fid, true);
793 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
794 struct ecore_ptt *p_ptt,
795 struct ecore_vf_info *vf, bool enable)
799 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
801 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
804 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
806 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
808 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
811 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
814 static enum _ecore_status_t
815 ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
816 struct ecore_ptt *p_ptt,
823 /* If client overrides this, don't do anything */
824 if (p_hwfn->p_dev->b_dont_override_vf_msix)
825 return ECORE_SUCCESS;
827 /* For AH onward, configuration is per-PF. Find maximum of all
828 * the currently enabled child VFs, and set the number to be that.
830 if (!ECORE_IS_BB(p_hwfn->p_dev)) {
831 ecore_for_each_vf(p_hwfn, i) {
832 struct ecore_vf_info *p_vf;
834 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
838 current_max = OSAL_MAX_T(u8, current_max,
843 if (num_sbs > current_max)
844 return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
847 return ECORE_SUCCESS;
850 static enum _ecore_status_t
851 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
852 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
854 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
855 enum _ecore_status_t rc = ECORE_SUCCESS;
858 return ECORE_SUCCESS;
860 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
861 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
862 ECORE_VF_ABS_ID(p_hwfn, vf));
864 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
865 ECORE_VF_ABS_ID(p_hwfn, vf));
867 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
869 /* It's possible VF was previously considered malicious */
870 vf->b_malicious = false;
871 rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
872 vf->abs_vf_id, vf->num_sbs);
873 if (rc != ECORE_SUCCESS)
876 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
878 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
879 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
881 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
882 p_hwfn->hw_info.hw_mode);
885 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
894 * @brief ecore_iov_config_perm_table - configure the permission
896 * In E4, queue zone permission table size is 320x9. There
897 * are 320 VF queues for single engine device (256 for dual
898 * engine device), and each entry has the following format:
905 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
906 struct ecore_ptt *p_ptt,
907 struct ecore_vf_info *vf, u8 enable)
913 for (qid = 0; qid < vf->num_rxqs; qid++) {
914 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
917 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
918 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
919 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
923 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
924 struct ecore_ptt *p_ptt,
925 struct ecore_vf_info *vf)
927 /* Reset vf in IGU - interrupts are still disabled */
928 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
930 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
932 /* Permission Table */
933 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
936 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
937 struct ecore_ptt *p_ptt,
938 struct ecore_vf_info *vf,
941 struct ecore_igu_block *p_block;
942 struct cau_sb_entry sb_entry;
946 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
948 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
949 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
951 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
952 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
953 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
955 for (qid = 0; qid < num_rx_queues; qid++) {
956 p_block = ecore_get_igu_free_sb(p_hwfn, false);
957 vf->igu_sbs[qid] = p_block->igu_sb_id;
958 p_block->status &= ~ECORE_IGU_STATUS_FREE;
959 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
961 ecore_wr(p_hwfn, p_ptt,
962 IGU_REG_MAPPING_MEMORY +
963 sizeof(u32) * p_block->igu_sb_id, val);
965 /* Configure igu sb in CAU which were marked valid */
966 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
969 ecore_dmae_host2grc(p_hwfn, p_ptt,
970 (u64)(osal_uintptr_t)&sb_entry,
971 CAU_REG_SB_VAR_MEMORY +
972 p_block->igu_sb_id * sizeof(u64), 2, 0);
975 vf->num_sbs = (u8)num_rx_queues;
982 * @brief The function invalidates all the VF entries,
983 * technically this isn't required, but added for
984 * cleaness and ease of debugging incase a VF attempts to
985 * produce an interrupt after it has been taken down.
991 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
992 struct ecore_ptt *p_ptt,
993 struct ecore_vf_info *vf)
995 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
999 /* Invalidate igu CAM lines and mark them as free */
1000 for (idx = 0; idx < vf->num_sbs; idx++) {
1001 igu_id = vf->igu_sbs[idx];
1002 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
1004 val = ecore_rd(p_hwfn, p_ptt, addr);
1005 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1006 ecore_wr(p_hwfn, p_ptt, addr, val);
1008 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
1009 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
1015 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1017 struct ecore_mcp_link_params *params,
1018 struct ecore_mcp_link_state *link,
1019 struct ecore_mcp_link_capabilities *p_caps)
1021 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1022 struct ecore_bulletin_content *p_bulletin;
1027 p_bulletin = p_vf->bulletin.p_virt;
1028 p_bulletin->req_autoneg = params->speed.autoneg;
1029 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1030 p_bulletin->req_forced_speed = params->speed.forced_speed;
1031 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1032 p_bulletin->req_forced_rx = params->pause.forced_rx;
1033 p_bulletin->req_forced_tx = params->pause.forced_tx;
1034 p_bulletin->req_loopback = params->loopback_mode;
1036 p_bulletin->link_up = link->link_up;
1037 p_bulletin->speed = link->speed;
1038 p_bulletin->full_duplex = link->full_duplex;
1039 p_bulletin->autoneg = link->an;
1040 p_bulletin->autoneg_complete = link->an_complete;
1041 p_bulletin->parallel_detection = link->parallel_detection;
1042 p_bulletin->pfc_enabled = link->pfc_enabled;
1043 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1044 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1045 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1046 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1047 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1049 p_bulletin->capability_speed = p_caps->speed_capabilities;
1052 enum _ecore_status_t
1053 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1054 struct ecore_ptt *p_ptt,
1055 struct ecore_iov_vf_init_params *p_params)
1057 struct ecore_mcp_link_capabilities link_caps;
1058 struct ecore_mcp_link_params link_params;
1059 struct ecore_mcp_link_state link_state;
1060 u8 num_of_vf_available_chains = 0;
1061 struct ecore_vf_info *vf = OSAL_NULL;
1063 enum _ecore_status_t rc = ECORE_SUCCESS;
1067 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1069 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1070 return ECORE_UNKNOWN_ERROR;
1074 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1075 p_params->rel_vf_id);
1079 /* Perform sanity checking on the requested vport/rss */
1080 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1081 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1082 p_params->rel_vf_id, p_params->vport_id);
1086 if ((p_params->num_queues > 1) &&
1087 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1088 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1089 p_params->rel_vf_id, p_params->rss_eng_id);
1093 /* TODO - remove this once we get confidence of change */
1094 if (!p_params->vport_id) {
1095 DP_NOTICE(p_hwfn, false,
1096 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1097 p_params->rel_vf_id);
1099 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1100 DP_NOTICE(p_hwfn, false,
1101 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1102 p_params->rel_vf_id);
1104 vf->vport_id = p_params->vport_id;
1105 vf->rss_eng_id = p_params->rss_eng_id;
1107 /* Since it's possible to relocate SBs, it's a bit difficult to check
1108 * things here. Simply check whether the index falls in the range
1109 * belonging to the PF.
1111 for (i = 0; i < p_params->num_queues; i++) {
1112 qid = p_params->req_rx_queue[i];
1113 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1114 DP_NOTICE(p_hwfn, true,
1115 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1116 qid, p_params->rel_vf_id,
1117 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1121 qid = p_params->req_tx_queue[i];
1122 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1123 DP_NOTICE(p_hwfn, true,
1124 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1125 qid, p_params->rel_vf_id,
1126 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1131 /* Limit number of queues according to number of CIDs */
1132 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1133 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1134 "VF[%d] - requesting to initialize for 0x%04x queues"
1135 " [0x%04x CIDs available]\n",
1136 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1137 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1139 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1143 if (num_of_vf_available_chains == 0) {
1144 DP_ERR(p_hwfn, "no available igu sbs\n");
1148 /* Choose queue number and index ranges */
1149 vf->num_rxqs = num_of_vf_available_chains;
1150 vf->num_txqs = num_of_vf_available_chains;
1152 for (i = 0; i < vf->num_rxqs; i++) {
1153 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1155 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1156 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1158 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1159 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1160 vf->relative_vf_id, i, vf->igu_sbs[i],
1161 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1164 /* Update the link configuration in bulletin.
1166 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1167 sizeof(link_params));
1168 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1169 sizeof(link_state));
1170 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1172 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1173 &link_params, &link_state, &link_caps);
1175 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1177 if (rc == ECORE_SUCCESS) {
1179 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1180 (1ULL << (vf->relative_vf_id % 64));
1182 if (IS_LEAD_HWFN(p_hwfn))
1183 p_hwfn->p_dev->p_iov_info->num_vfs++;
1189 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1190 struct ecore_ptt *p_ptt,
1193 struct ecore_mcp_link_capabilities caps;
1194 struct ecore_mcp_link_params params;
1195 struct ecore_mcp_link_state link;
1196 struct ecore_vf_info *vf = OSAL_NULL;
1198 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1200 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1201 return ECORE_UNKNOWN_ERROR;
1204 if (vf->bulletin.p_virt)
1205 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1206 sizeof(*vf->bulletin.p_virt));
1208 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1210 /* Get the link configuration back in bulletin so
1211 * that when VFs are re-enabled they get the actual
1212 * link configuration.
1214 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1215 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1216 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1218 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1220 /* Forget the VF's acquisition message */
1221 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1223 /* disablng interrupts and resetting permission table was done during
1224 * vf-close, however, we could get here without going through vf_close
1226 /* Disable Interrupts for VF */
1227 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1229 /* Reset Permission table */
1230 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1234 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1238 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1239 ~(1ULL << (vf->relative_vf_id / 64));
1241 if (IS_LEAD_HWFN(p_hwfn))
1242 p_hwfn->p_dev->p_iov_info->num_vfs--;
1245 return ECORE_SUCCESS;
1248 static bool ecore_iov_tlv_supported(u16 tlvtype)
1250 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1253 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1254 struct ecore_vf_info *vf, u16 tlv)
1256 /* lock the channel */
1257 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1259 /* record the locking op */
1260 /* vf->op_current = tlv; @@@TBD MichalK */
1263 if (ecore_iov_tlv_supported(tlv))
1266 "VF[%d]: vf pf channel locked by %s\n",
1268 ecore_channel_tlvs_string[tlv]);
1272 "VF[%d]: vf pf channel locked by %04x\n",
1273 vf->abs_vf_id, tlv);
1276 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1277 struct ecore_vf_info *vf,
1280 /* log the unlock */
1281 if (ecore_iov_tlv_supported(expected_tlv))
1284 "VF[%d]: vf pf channel unlocked by %s\n",
1286 ecore_channel_tlvs_string[expected_tlv]);
1290 "VF[%d]: vf pf channel unlocked by %04x\n",
1291 vf->abs_vf_id, expected_tlv);
1293 /* record the locking op */
1294 /* vf->op_current = CHANNEL_TLV_NONE; */
1297 /* place a given tlv on the tlv buffer, continuing current tlv list */
1298 void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
1300 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1303 tl->length = length;
1305 /* Offset should keep pointing to next TLV (the end of the last) */
1308 /* Return a pointer to the start of the added tlv */
1309 return *offset - length;
1312 /* list the types and lengths of the tlvs on the buffer */
1313 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1315 u16 i = 1, total_length = 0;
1316 struct channel_tlv *tlv;
1319 /* cast current tlv list entry to channel tlv header */
1320 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1323 if (ecore_iov_tlv_supported(tlv->type))
1324 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1325 "TLV number %d: type %s, length %d\n",
1326 i, ecore_channel_tlvs_string[tlv->type],
1329 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1330 "TLV number %d: type %d, length %d\n",
1331 i, tlv->type, tlv->length);
1333 if (tlv->type == CHANNEL_TLV_LIST_END)
1336 /* Validate entry - protect against malicious VFs */
1338 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1341 total_length += tlv->length;
1342 if (total_length >= sizeof(struct tlv_buffer_size)) {
1343 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1351 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1352 struct ecore_ptt *p_ptt,
1353 struct ecore_vf_info *p_vf,
1354 #ifdef CONFIG_ECORE_SW_CHANNEL
1357 u16 OSAL_UNUSED length,
1361 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1362 struct ecore_dmae_params params;
1365 mbx->reply_virt->default_resp.hdr.status = status;
1367 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1369 #ifdef CONFIG_ECORE_SW_CHANNEL
1370 mbx->sw_mbx.response_size =
1371 length + sizeof(struct channel_list_end_tlv);
1373 if (!p_hwfn->p_dev->b_hw_channel)
1377 eng_vf_id = p_vf->abs_vf_id;
1379 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1380 params.flags = ECORE_DMAE_FLAG_VF_DST;
1381 params.dst_vfid = eng_vf_id;
1383 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1384 mbx->req_virt->first_tlv.reply_address +
1386 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1389 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1390 mbx->req_virt->first_tlv.reply_address,
1391 sizeof(u64) / 4, ¶ms);
1394 GTT_BAR0_MAP_REG_USDM_RAM +
1395 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1397 OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
1400 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
1403 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1404 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1405 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1406 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1407 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1408 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1409 case ECORE_IOV_VP_UPDATE_MCAST:
1410 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1411 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1412 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1413 case ECORE_IOV_VP_UPDATE_RSS:
1414 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1415 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1416 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1417 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1418 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1424 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1425 struct ecore_vf_info *p_vf,
1426 struct ecore_iov_vf_mbx *p_mbx,
1427 u8 status, u16 tlvs_mask,
1430 struct pfvf_def_resp_tlv *resp;
1431 u16 size, total_len, i;
1433 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1434 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1435 size = sizeof(struct pfvf_def_resp_tlv);
1438 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1440 /* Prepare response for all extended tlvs if they are found by PF */
1441 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1442 if (!(tlvs_mask & (1 << i)))
1445 resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
1448 if (tlvs_accepted & (1 << i))
1449 resp->hdr.status = status;
1451 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1453 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1454 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1455 p_vf->relative_vf_id,
1456 ecore_iov_vport_to_tlv(i),
1462 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
1463 sizeof(struct channel_list_end_tlv));
1468 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1469 struct ecore_ptt *p_ptt,
1470 struct ecore_vf_info *vf_info,
1471 u16 type, u16 length, u8 status)
1473 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1475 mbx->offset = (u8 *)mbx->reply_virt;
1477 ecore_add_tlv(&mbx->offset, type, length);
1478 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
1479 sizeof(struct channel_list_end_tlv));
1481 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1484 struct ecore_public_vf_info
1485 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1487 bool b_enabled_only)
1489 struct ecore_vf_info *vf = OSAL_NULL;
1491 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1495 return &vf->p_vf_info;
1498 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1499 struct ecore_vf_info *p_vf)
1502 p_vf->vf_bulletin = 0;
1503 p_vf->vport_instance = 0;
1504 p_vf->configured_features = 0;
1506 /* If VF previously requested less resources, go back to default */
1507 p_vf->num_rxqs = p_vf->num_sbs;
1508 p_vf->num_txqs = p_vf->num_sbs;
1510 p_vf->num_active_rxqs = 0;
1512 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1513 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1515 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1516 if (!p_queue->cids[j].p_cid)
1519 ecore_eth_queue_cid_release(p_hwfn,
1520 p_queue->cids[j].p_cid);
1521 p_queue->cids[j].p_cid = OSAL_NULL;
1525 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1526 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1527 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1530 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1531 struct ecore_vf_info *p_vf,
1532 struct vf_pf_resc_request *p_req,
1533 struct pf_vf_resc *p_resp)
1537 /* Queue related information */
1538 p_resp->num_rxqs = p_vf->num_rxqs;
1539 p_resp->num_txqs = p_vf->num_txqs;
1540 p_resp->num_sbs = p_vf->num_sbs;
1542 for (i = 0; i < p_resp->num_sbs; i++) {
1543 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1544 /* TODO - what's this sb_qid field? Is it deprecated?
1545 * or is there an ecore_client that looks at this?
1547 p_resp->hw_sbs[i].sb_qid = 0;
1550 /* These fields are filled for backward compatibility.
1551 * Unused by modern vfs.
1553 for (i = 0; i < p_resp->num_rxqs; i++) {
1554 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1555 (u16 *)&p_resp->hw_qid[i]);
1559 /* Filter related information */
1560 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1561 p_req->num_mac_filters);
1562 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1563 p_req->num_vlan_filters);
1566 OSAL_MIN_T(u8, p_req->num_cids,
1567 p_hwfn->pf_params.eth_pf_params.num_vf_cons);
1569 /* This isn't really needed/enforced, but some legacy VFs might depend
1570 * on the correct filling of this field.
1572 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1574 /* Validate sufficient resources for VF */
1575 if (p_resp->num_rxqs < p_req->num_rxqs ||
1576 p_resp->num_txqs < p_req->num_txqs ||
1577 p_resp->num_sbs < p_req->num_sbs ||
1578 p_resp->num_mac_filters < p_req->num_mac_filters ||
1579 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1580 p_resp->num_mc_filters < p_req->num_mc_filters ||
1581 p_resp->num_cids < p_req->num_cids) {
1582 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1583 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1585 p_req->num_rxqs, p_resp->num_rxqs,
1586 p_req->num_rxqs, p_resp->num_txqs,
1587 p_req->num_sbs, p_resp->num_sbs,
1588 p_req->num_mac_filters, p_resp->num_mac_filters,
1589 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1590 p_req->num_mc_filters, p_resp->num_mc_filters,
1591 p_req->num_cids, p_resp->num_cids);
1593 /* Some legacy OSes are incapable of correctly handling this
1596 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1597 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1598 (p_vf->acquire.vfdev_info.os_type ==
1599 VFPF_ACQUIRE_OS_WINDOWS))
1600 return PFVF_STATUS_SUCCESS;
1602 return PFVF_STATUS_NO_RESOURCE;
1605 return PFVF_STATUS_SUCCESS;
1608 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
1610 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1611 OFFSETOF(struct mstorm_vf_zone,
1612 non_trigger.eth_queue_stat);
1613 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1614 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1615 OFFSETOF(struct ustorm_vf_zone,
1616 non_trigger.eth_queue_stat);
1617 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1618 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1619 OFFSETOF(struct pstorm_vf_zone,
1620 non_trigger.eth_queue_stat);
1621 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1622 p_stats->tstats.address = 0;
1623 p_stats->tstats.len = 0;
1626 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1627 struct ecore_ptt *p_ptt,
1628 struct ecore_vf_info *vf)
1630 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1631 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1632 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1633 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1634 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1635 struct pf_vf_resc *resc = &resp->resc;
1636 enum _ecore_status_t rc;
1638 OSAL_MEMSET(resp, 0, sizeof(*resp));
1640 /* Write the PF version so that VF would know which version
1641 * is supported - might be later overridden. This guarantees that
1642 * VF could recognize legacy PF based on lack of versions in reply.
1644 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1645 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1647 /* TODO - not doing anything is bad since we'll assert, but this isn't
1648 * necessarily the right behavior - perhaps we should have allowed some
1651 if (vf->state != VF_FREE &&
1652 vf->state != VF_STOPPED) {
1653 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1654 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1655 vf->abs_vf_id, vf->state);
1659 /* Validate FW compatibility */
1660 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1661 if (req->vfdev_info.capabilities &
1662 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1663 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1665 /* This legacy support would need to be removed once
1666 * the major has changed.
1668 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1670 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1671 "VF[%d] is pre-fastpath HSI\n",
1673 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1674 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1677 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1678 " incompatible with loaded FW's faspath"
1681 req->vfdev_info.eth_fp_hsi_major,
1682 req->vfdev_info.eth_fp_hsi_minor,
1683 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1689 /* On 100g PFs, prevent old VFs from loading */
1690 if ((p_hwfn->p_dev->num_hwfns > 1) &&
1691 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1693 "VF[%d] is running an old driver that doesn't support"
1699 #ifndef __EXTRACT__LINUX__
1700 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1701 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1706 /* Store the acquire message */
1707 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1709 vf->opaque_fid = req->vfdev_info.opaque_fid;
1711 vf->vf_bulletin = req->bulletin_addr;
1712 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1713 vf->bulletin.size : req->bulletin_size;
1715 /* fill in pfdev info */
1716 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1717 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1718 pfdev_info->indices_per_sb = PIS_PER_SB;
1720 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1721 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1722 if (p_hwfn->p_dev->num_hwfns > 1)
1723 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1725 /* Share our ability to use multiple queue-ids only with VFs
1728 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1729 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1731 ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
1733 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1736 pfdev_info->fw_major = FW_MAJOR_VERSION;
1737 pfdev_info->fw_minor = FW_MINOR_VERSION;
1738 pfdev_info->fw_rev = FW_REVISION_VERSION;
1739 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1741 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1744 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1745 req->vfdev_info.eth_fp_hsi_minor);
1746 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1747 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1750 pfdev_info->dev_type = p_hwfn->p_dev->type;
1751 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1753 /* Fill resources available to VF; Make sure there are enough to
1754 * satisfy the VF's request.
1756 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, vf,
1757 &req->resc_request, resc);
1758 if (vfpf_status != PFVF_STATUS_SUCCESS)
1761 /* Start the VF in FW */
1762 rc = ecore_sp_vf_start(p_hwfn, vf);
1763 if (rc != ECORE_SUCCESS) {
1764 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1766 vfpf_status = PFVF_STATUS_FAILURE;
1770 /* Fill agreed size of bulletin board in response, and post
1771 * an initial image to the bulletin board.
1773 resp->bulletin_size = vf->bulletin.size;
1774 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1776 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1777 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1778 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1779 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1781 vf->abs_vf_id, resp->pfdev_info.chip_num,
1782 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1783 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1784 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1785 resc->num_vlan_filters);
1787 vf->state = VF_ACQUIRED;
1790 /* Prepare Response */
1791 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1792 sizeof(struct pfvf_acquire_resp_tlv),
1796 static enum _ecore_status_t
1797 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1798 struct ecore_vf_info *p_vf, bool val)
1800 struct ecore_sp_vport_update_params params;
1801 enum _ecore_status_t rc;
1803 if (val == p_vf->spoof_chk) {
1804 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1805 "Spoofchk value[%d] is already configured\n", val);
1806 return ECORE_SUCCESS;
1809 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1810 params.opaque_fid = p_vf->opaque_fid;
1811 params.vport_id = p_vf->vport_id;
1812 params.update_anti_spoofing_en_flg = 1;
1813 params.anti_spoofing_en = val;
1815 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1817 if (rc == ECORE_SUCCESS) {
1818 p_vf->spoof_chk = val;
1819 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1820 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1821 "Spoofchk val[%d] configured\n", val);
1823 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1824 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1825 val, p_vf->relative_vf_id);
1831 static enum _ecore_status_t
1832 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1833 struct ecore_vf_info *p_vf)
1835 struct ecore_filter_ucast filter;
1836 enum _ecore_status_t rc = ECORE_SUCCESS;
1839 OSAL_MEMSET(&filter, 0, sizeof(filter));
1840 filter.is_rx_filter = 1;
1841 filter.is_tx_filter = 1;
1842 filter.vport_to_add_to = p_vf->vport_id;
1843 filter.opcode = ECORE_FILTER_ADD;
1845 /* Reconfigure vlans */
1846 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1847 if (!p_vf->shadow_config.vlans[i].used)
1850 filter.type = ECORE_FILTER_VLAN;
1851 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1852 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1853 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1854 filter.vlan, p_vf->relative_vf_id);
1855 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1856 &filter, ECORE_SPQ_MODE_CB,
1859 DP_NOTICE(p_hwfn, true,
1860 "Failed to configure VLAN [%04x]"
1862 filter.vlan, p_vf->relative_vf_id);
1870 static enum _ecore_status_t
1871 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1872 struct ecore_vf_info *p_vf, u64 events)
1874 enum _ecore_status_t rc = ECORE_SUCCESS;
1876 /*TODO - what about MACs? */
1878 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1879 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1880 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1885 static enum _ecore_status_t
1886 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1887 struct ecore_vf_info *p_vf,
1890 enum _ecore_status_t rc = ECORE_SUCCESS;
1891 struct ecore_filter_ucast filter;
1893 if (!p_vf->vport_instance)
1896 if (events & (1 << MAC_ADDR_FORCED)) {
1897 /* Since there's no way [currently] of removing the MAC,
1898 * we can always assume this means we need to force it.
1900 OSAL_MEMSET(&filter, 0, sizeof(filter));
1901 filter.type = ECORE_FILTER_MAC;
1902 filter.opcode = ECORE_FILTER_REPLACE;
1903 filter.is_rx_filter = 1;
1904 filter.is_tx_filter = 1;
1905 filter.vport_to_add_to = p_vf->vport_id;
1906 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1908 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1910 ECORE_SPQ_MODE_CB, OSAL_NULL);
1912 DP_NOTICE(p_hwfn, true,
1913 "PF failed to configure MAC for VF\n");
1917 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1920 if (events & (1 << VLAN_ADDR_FORCED)) {
1921 struct ecore_sp_vport_update_params vport_update;
1925 OSAL_MEMSET(&filter, 0, sizeof(filter));
1926 filter.type = ECORE_FILTER_VLAN;
1927 filter.is_rx_filter = 1;
1928 filter.is_tx_filter = 1;
1929 filter.vport_to_add_to = p_vf->vport_id;
1930 filter.vlan = p_vf->bulletin.p_virt->pvid;
1931 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
1934 /* Send the ramrod */
1935 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1937 ECORE_SPQ_MODE_CB, OSAL_NULL);
1939 DP_NOTICE(p_hwfn, true,
1940 "PF failed to configure VLAN for VF\n");
1944 /* Update the default-vlan & silent vlan stripping */
1945 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
1946 vport_update.opaque_fid = p_vf->opaque_fid;
1947 vport_update.vport_id = p_vf->vport_id;
1948 vport_update.update_default_vlan_enable_flg = 1;
1949 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1950 vport_update.update_default_vlan_flg = 1;
1951 vport_update.default_vlan = filter.vlan;
1953 vport_update.update_inner_vlan_removal_flg = 1;
1954 removal = filter.vlan ?
1955 1 : p_vf->shadow_config.inner_vlan_removal;
1956 vport_update.inner_vlan_removal_flg = removal;
1957 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1958 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
1959 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
1961 DP_NOTICE(p_hwfn, true,
1962 "PF failed to configure VF vport for vlan\n");
1966 /* Update all the Rx queues */
1967 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1968 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1969 struct ecore_queue_cid *p_cid = OSAL_NULL;
1971 /* There can be at most 1 Rx queue on qzone. Find it */
1972 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
1973 if (p_cid == OSAL_NULL)
1976 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
1979 ECORE_SPQ_MODE_EBLOCK,
1982 DP_NOTICE(p_hwfn, true,
1983 "Failed to send Rx update"
1984 " fo queue[0x%04x]\n",
1985 p_cid->rel.queue_id);
1991 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1993 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1996 /* If forced features are terminated, we need to configure the shadow
1997 * configuration back again.
2000 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2005 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2006 struct ecore_ptt *p_ptt,
2007 struct ecore_vf_info *vf)
2009 struct ecore_sp_vport_start_params params = { 0 };
2010 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2011 struct vfpf_vport_start_tlv *start;
2012 u8 status = PFVF_STATUS_SUCCESS;
2013 struct ecore_vf_info *vf_info;
2016 enum _ecore_status_t rc;
2018 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2020 DP_NOTICE(p_hwfn->p_dev, true,
2021 "Failed to get VF info, invalid vfid [%d]\n",
2022 vf->relative_vf_id);
2026 vf->state = VF_ENABLED;
2027 start = &mbx->req_virt->start_vport;
2029 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2031 /* Initialize Status block in CAU */
2032 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2033 if (!start->sb_addr[sb_id]) {
2034 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2035 "VF[%d] did not fill the address of SB %d\n",
2036 vf->relative_vf_id, sb_id);
2040 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2041 start->sb_addr[sb_id],
2046 vf->mtu = start->mtu;
2047 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2049 /* Take into consideration configuration forced by hypervisor;
2050 * If none is configured, use the supplied VF values [for old
2051 * vfs that would still be fine, since they passed '0' as padding].
2053 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2054 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2055 u8 vf_req = start->only_untagged;
2057 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2058 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2061 params.tpa_mode = start->tpa_mode;
2062 params.remove_inner_vlan = start->inner_vlan_removal;
2063 params.tx_switching = true;
2066 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2067 DP_NOTICE(p_hwfn, false,
2068 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2069 params.tx_switching = false;
2073 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2074 params.drop_ttl0 = false;
2075 params.concrete_fid = vf->concrete_fid;
2076 params.opaque_fid = vf->opaque_fid;
2077 params.vport_id = vf->vport_id;
2078 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2079 params.mtu = vf->mtu;
2080 params.check_mac = true;
2082 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2083 if (rc != ECORE_SUCCESS) {
2085 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2086 status = PFVF_STATUS_FAILURE;
2088 vf->vport_instance++;
2090 /* Force configuration if needed on the newly opened vport */
2091 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2092 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2093 vf->vport_id, vf->opaque_fid);
2094 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2097 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2098 sizeof(struct pfvf_def_resp_tlv), status);
2101 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2102 struct ecore_ptt *p_ptt,
2103 struct ecore_vf_info *vf)
2105 u8 status = PFVF_STATUS_SUCCESS;
2106 enum _ecore_status_t rc;
2108 vf->vport_instance--;
2109 vf->spoof_chk = false;
2111 if ((ecore_iov_validate_active_rxq(vf)) ||
2112 (ecore_iov_validate_active_txq(vf))) {
2113 vf->b_malicious = true;
2114 DP_NOTICE(p_hwfn, false,
2115 "VF [%02x] - considered malicious;"
2116 " Unable to stop RX/TX queuess\n",
2118 status = PFVF_STATUS_MALICIOUS;
2122 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2123 if (rc != ECORE_SUCCESS) {
2125 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2126 status = PFVF_STATUS_FAILURE;
2129 /* Forget the configuration on the vport */
2130 vf->configured_features = 0;
2131 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2134 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2135 sizeof(struct pfvf_def_resp_tlv), status);
2138 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2139 struct ecore_ptt *p_ptt,
2140 struct ecore_vf_info *vf,
2141 u8 status, bool b_legacy)
2143 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2144 struct pfvf_start_queue_resp_tlv *p_tlv;
2145 struct vfpf_start_rxq_tlv *req;
2148 mbx->offset = (u8 *)mbx->reply_virt;
2150 /* Taking a bigger struct instead of adding a TLV to list was a
2151 * mistake, but one which we're now stuck with, as some older
2152 * clients assume the size of the previous response.
2155 length = sizeof(*p_tlv);
2157 length = sizeof(struct pfvf_def_resp_tlv);
2159 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
2160 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2161 sizeof(struct channel_list_end_tlv));
2163 /* Update the TLV with the response */
2164 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2165 req = &mbx->req_virt->start_rxq;
2166 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2167 OFFSETOF(struct mstorm_vf_zone,
2168 non_trigger.eth_rx_queue_producers) +
2169 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2172 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2175 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
2176 struct ecore_vf_info *p_vf, bool b_is_tx)
2178 struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2179 struct vfpf_qid_tlv *p_qid_tlv;
2181 /* Search for the qid if the VF published if its going to provide it */
2182 if (!(p_vf->acquire.vfdev_info.capabilities &
2183 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2185 return ECORE_IOV_LEGACY_QID_TX;
2187 return ECORE_IOV_LEGACY_QID_RX;
2190 p_qid_tlv = (struct vfpf_qid_tlv *)
2191 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2193 if (p_qid_tlv == OSAL_NULL) {
2194 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2195 "VF[%2x]: Failed to provide qid\n",
2196 p_vf->relative_vf_id);
2198 return ECORE_IOV_QID_INVALID;
2201 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2202 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2203 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2204 p_vf->relative_vf_id, p_qid_tlv->qid);
2205 return ECORE_IOV_QID_INVALID;
2208 return p_qid_tlv->qid;
2211 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2212 struct ecore_ptt *p_ptt,
2213 struct ecore_vf_info *vf)
2215 struct ecore_queue_start_common_params params;
2216 struct ecore_queue_cid_vf_params vf_params;
2217 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2218 u8 status = PFVF_STATUS_NO_RESOURCE;
2219 u8 qid_usage_idx, vf_legacy = 0;
2220 struct ecore_vf_queue *p_queue;
2221 struct vfpf_start_rxq_tlv *req;
2222 struct ecore_queue_cid *p_cid;
2223 struct ecore_sb_info sb_dummy;
2224 enum _ecore_status_t rc;
2226 req = &mbx->req_virt->start_rxq;
2228 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2229 ECORE_IOV_VALIDATE_Q_DISABLE) ||
2230 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2233 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2234 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2237 p_queue = &vf->vf_queues[req->rx_qid];
2238 if (p_queue->cids[qid_usage_idx].p_cid)
2241 vf_legacy = ecore_vf_calculate_legacy(vf);
2243 /* Acquire a new queue-cid */
2244 OSAL_MEMSET(¶ms, 0, sizeof(params));
2245 params.queue_id = (u8)p_queue->fw_rx_qid;
2246 params.vport_id = vf->vport_id;
2247 params.stats_id = vf->abs_vf_id + 0x10;
2249 /* Since IGU index is passed via sb_info, construct a dummy one */
2250 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2251 sb_dummy.igu_sb_id = req->hw_sb;
2252 params.p_sb = &sb_dummy;
2253 params.sb_idx = req->sb_index;
2255 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2256 vf_params.vfid = vf->relative_vf_id;
2257 vf_params.vf_qid = (u8)req->rx_qid;
2258 vf_params.vf_legacy = vf_legacy;
2259 vf_params.qid_usage_idx = qid_usage_idx;
2261 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2262 ¶ms, true, &vf_params);
2263 if (p_cid == OSAL_NULL)
2266 /* Legacy VFs have their Producers in a different location, which they
2267 * calculate on their own and clean the producer prior to this.
2269 if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
2271 GTT_BAR0_MAP_REG_MSDM_RAM +
2272 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2275 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2280 if (rc != ECORE_SUCCESS) {
2281 status = PFVF_STATUS_FAILURE;
2282 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2284 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2285 p_queue->cids[qid_usage_idx].b_is_tx = false;
2286 status = PFVF_STATUS_SUCCESS;
2287 vf->num_active_rxqs++;
2291 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2293 ECORE_QCID_LEGACY_VF_RX_PROD));
2297 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2298 struct ecore_tunnel_info *p_tun,
2299 u16 tunn_feature_mask)
2301 p_resp->tunn_feature_mask = tunn_feature_mask;
2302 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2303 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2304 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2305 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2306 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2307 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2308 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2309 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2310 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2311 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2312 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2313 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2317 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2318 struct ecore_tunn_update_type *p_tun,
2319 enum ecore_tunn_mode mask, u8 tun_cls)
2321 if (p_req->tun_mode_update_mask & (1 << mask)) {
2322 p_tun->b_update_mode = true;
2324 if (p_req->tunn_mode & (1 << mask))
2325 p_tun->b_mode_enabled = true;
2328 p_tun->tun_cls = tun_cls;
2332 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2333 struct ecore_tunn_update_type *p_tun,
2334 struct ecore_tunn_update_udp_port *p_port,
2335 enum ecore_tunn_mode mask,
2336 u8 tun_cls, u8 update_port, u16 port)
2339 p_port->b_update_port = true;
2340 p_port->port = port;
2343 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2347 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2349 bool b_update_requested = false;
2351 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2352 p_req->update_geneve_port || p_req->update_vxlan_port)
2353 b_update_requested = true;
2355 return b_update_requested;
2358 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2359 struct ecore_ptt *p_ptt,
2360 struct ecore_vf_info *p_vf)
2362 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2363 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2364 struct pfvf_update_tunn_param_tlv *p_resp;
2365 struct vfpf_update_tunn_param_tlv *p_req;
2366 enum _ecore_status_t rc = ECORE_SUCCESS;
2367 u8 status = PFVF_STATUS_SUCCESS;
2368 bool b_update_required = false;
2369 struct ecore_tunnel_info tunn;
2370 u16 tunn_feature_mask = 0;
2373 mbx->offset = (u8 *)mbx->reply_virt;
2375 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2376 p_req = &mbx->req_virt->tunn_param_update;
2378 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2379 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2380 "No tunnel update requested by VF\n");
2381 status = PFVF_STATUS_FAILURE;
2385 tunn.b_update_rx_cls = p_req->update_tun_cls;
2386 tunn.b_update_tx_cls = p_req->update_tun_cls;
2388 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2389 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2390 p_req->update_vxlan_port,
2392 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2393 ECORE_MODE_L2GENEVE_TUNN,
2394 p_req->l2geneve_clss,
2395 p_req->update_geneve_port,
2396 p_req->geneve_port);
2397 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2398 ECORE_MODE_IPGENEVE_TUNN,
2399 p_req->ipgeneve_clss);
2400 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2401 ECORE_MODE_L2GRE_TUNN,
2403 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2404 ECORE_MODE_IPGRE_TUNN,
2407 /* If PF modifies VF's req then it should
2408 * still return an error in case of partial configuration
2409 * or modified configuration as opposed to requested one.
2411 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2412 &b_update_required, &tunn);
2414 if (rc != ECORE_SUCCESS)
2415 status = PFVF_STATUS_FAILURE;
2417 /* If ECORE client is willing to update anything ? */
2418 if (b_update_required) {
2421 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2422 ECORE_SPQ_MODE_EBLOCK,
2424 if (rc != ECORE_SUCCESS)
2425 status = PFVF_STATUS_FAILURE;
2427 geneve_port = p_tun->geneve_port.port;
2428 ecore_for_each_vf(p_hwfn, i) {
2429 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2430 p_tun->vxlan_port.port,
2436 p_resp = ecore_add_tlv(&mbx->offset,
2437 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2439 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2440 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2441 sizeof(struct channel_list_end_tlv));
2443 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2446 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2447 struct ecore_ptt *p_ptt,
2448 struct ecore_vf_info *p_vf,
2452 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2453 struct pfvf_start_queue_resp_tlv *p_tlv;
2454 bool b_legacy = false;
2457 mbx->offset = (u8 *)mbx->reply_virt;
2459 /* Taking a bigger struct instead of adding a TLV to list was a
2460 * mistake, but one which we're now stuck with, as some older
2461 * clients assume the size of the previous response.
2463 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2464 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2468 length = sizeof(*p_tlv);
2470 length = sizeof(struct pfvf_def_resp_tlv);
2472 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
2473 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2474 sizeof(struct channel_list_end_tlv));
2476 /* Update the TLV with the response */
2477 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2478 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2480 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2483 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2484 struct ecore_ptt *p_ptt,
2485 struct ecore_vf_info *vf)
2487 struct ecore_queue_start_common_params params;
2488 struct ecore_queue_cid_vf_params vf_params;
2489 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2490 u8 status = PFVF_STATUS_NO_RESOURCE;
2491 struct ecore_vf_queue *p_queue;
2492 struct vfpf_start_txq_tlv *req;
2493 struct ecore_queue_cid *p_cid;
2494 struct ecore_sb_info sb_dummy;
2495 u8 qid_usage_idx, vf_legacy;
2497 enum _ecore_status_t rc;
2500 OSAL_MEMSET(¶ms, 0, sizeof(params));
2501 req = &mbx->req_virt->start_txq;
2503 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2504 ECORE_IOV_VALIDATE_Q_NA) ||
2505 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2508 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2509 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2512 p_queue = &vf->vf_queues[req->tx_qid];
2513 if (p_queue->cids[qid_usage_idx].p_cid)
2516 vf_legacy = ecore_vf_calculate_legacy(vf);
2518 /* Acquire a new queue-cid */
2519 params.queue_id = p_queue->fw_tx_qid;
2520 params.vport_id = vf->vport_id;
2521 params.stats_id = vf->abs_vf_id + 0x10;
2523 /* Since IGU index is passed via sb_info, construct a dummy one */
2524 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2525 sb_dummy.igu_sb_id = req->hw_sb;
2526 params.p_sb = &sb_dummy;
2527 params.sb_idx = req->sb_index;
2529 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2530 vf_params.vfid = vf->relative_vf_id;
2531 vf_params.vf_qid = (u8)req->tx_qid;
2532 vf_params.vf_legacy = vf_legacy;
2533 vf_params.qid_usage_idx = qid_usage_idx;
2535 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2536 ¶ms, false, &vf_params);
2537 if (p_cid == OSAL_NULL)
2540 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2541 vf->relative_vf_id);
2542 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2543 req->pbl_addr, req->pbl_size, pq);
2544 if (rc != ECORE_SUCCESS) {
2545 status = PFVF_STATUS_FAILURE;
2546 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2548 status = PFVF_STATUS_SUCCESS;
2549 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2550 p_queue->cids[qid_usage_idx].b_is_tx = true;
2555 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2559 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2560 struct ecore_vf_info *vf,
2563 bool cqe_completion)
2565 struct ecore_vf_queue *p_queue;
2566 enum _ecore_status_t rc = ECORE_SUCCESS;
2568 if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
2569 ECORE_IOV_VALIDATE_Q_NA)) {
2570 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2571 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2572 vf->relative_vf_id, rxq_id, qid_usage_idx);
2576 p_queue = &vf->vf_queues[rxq_id];
2578 /* We've validated the index and the existence of the active RXQ -
2579 * now we need to make sure that it's using the correct qid.
2581 if (!p_queue->cids[qid_usage_idx].p_cid ||
2582 p_queue->cids[qid_usage_idx].b_is_tx) {
2583 struct ecore_queue_cid *p_cid;
2585 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2586 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2587 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2588 vf->relative_vf_id, rxq_id, qid_usage_idx,
2589 rxq_id, p_cid->qid_usage_idx);
2593 /* Now that we know we have a valid Rx-queue - close it */
2594 rc = ecore_eth_rx_queue_stop(p_hwfn,
2595 p_queue->cids[qid_usage_idx].p_cid,
2596 false, cqe_completion);
2597 if (rc != ECORE_SUCCESS)
2600 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2601 vf->num_active_rxqs--;
2603 return ECORE_SUCCESS;
2606 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2607 struct ecore_vf_info *vf,
2611 struct ecore_vf_queue *p_queue;
2612 enum _ecore_status_t rc = ECORE_SUCCESS;
2614 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2615 ECORE_IOV_VALIDATE_Q_NA))
2618 p_queue = &vf->vf_queues[txq_id];
2619 if (!p_queue->cids[qid_usage_idx].p_cid ||
2620 !p_queue->cids[qid_usage_idx].b_is_tx)
2623 rc = ecore_eth_tx_queue_stop(p_hwfn,
2624 p_queue->cids[qid_usage_idx].p_cid);
2625 if (rc != ECORE_SUCCESS)
2628 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2629 return ECORE_SUCCESS;
2632 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2633 struct ecore_ptt *p_ptt,
2634 struct ecore_vf_info *vf)
2636 u16 length = sizeof(struct pfvf_def_resp_tlv);
2637 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2638 u8 status = PFVF_STATUS_FAILURE;
2639 struct vfpf_stop_rxqs_tlv *req;
2641 enum _ecore_status_t rc;
2643 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
2644 * would be one. Since no older ecore passed multiple queues
2645 * using this API, sanitize on the value.
2647 req = &mbx->req_virt->stop_rxqs;
2648 if (req->num_rxqs != 1) {
2649 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2650 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2651 vf->relative_vf_id);
2652 status = PFVF_STATUS_NOT_SUPPORTED;
2656 /* Find which qid-index is associated with the queue */
2657 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2658 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2661 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2662 qid_usage_idx, req->cqe_completion);
2663 if (rc == ECORE_SUCCESS)
2664 status = PFVF_STATUS_SUCCESS;
2666 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2670 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2671 struct ecore_ptt *p_ptt,
2672 struct ecore_vf_info *vf)
2674 u16 length = sizeof(struct pfvf_def_resp_tlv);
2675 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2676 u8 status = PFVF_STATUS_FAILURE;
2677 struct vfpf_stop_txqs_tlv *req;
2679 enum _ecore_status_t rc;
2681 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
2682 * would be one. Since no older ecore passed multiple queues
2683 * using this API, sanitize on the value.
2685 req = &mbx->req_virt->stop_txqs;
2686 if (req->num_txqs != 1) {
2687 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2688 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2689 vf->relative_vf_id);
2690 status = PFVF_STATUS_NOT_SUPPORTED;
2694 /* Find which qid-index is associated with the queue */
2695 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2696 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2699 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
2701 if (rc == ECORE_SUCCESS)
2702 status = PFVF_STATUS_SUCCESS;
2705 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2709 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2710 struct ecore_ptt *p_ptt,
2711 struct ecore_vf_info *vf)
2713 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2714 u16 length = sizeof(struct pfvf_def_resp_tlv);
2715 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2716 struct vfpf_update_rxq_tlv *req;
2717 u8 status = PFVF_STATUS_FAILURE;
2718 u8 complete_event_flg;
2719 u8 complete_cqe_flg;
2721 enum _ecore_status_t rc;
2724 req = &mbx->req_virt->update_rxq;
2725 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2726 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2728 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2729 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2732 /* Starting with the addition of CHANNEL_TLV_QID, this API started
2733 * expecting a single queue at a time. Validate this.
2735 if ((vf->acquire.vfdev_info.capabilities &
2736 VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
2737 req->num_rxqs != 1) {
2738 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2739 "VF[%d] supports QIDs but sends multiple queues\n",
2740 vf->relative_vf_id);
2744 /* Validate inputs - for the legacy case this is still true since
2745 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2747 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2748 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2749 ECORE_IOV_VALIDATE_Q_NA) ||
2750 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2751 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2752 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2753 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2754 vf->relative_vf_id, req->rx_qid,
2760 for (i = 0; i < req->num_rxqs; i++) {
2761 u16 qid = req->rx_qid + i;
2763 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2766 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2770 ECORE_SPQ_MODE_EBLOCK,
2772 if (rc != ECORE_SUCCESS)
2775 status = PFVF_STATUS_SUCCESS;
2777 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2781 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2782 void *p_tlvs_list, u16 req_type)
2784 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2788 if (!p_tlv->length) {
2789 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2793 if (p_tlv->type == req_type) {
2794 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2795 "Extended tlv type %s, length %d found\n",
2796 ecore_channel_tlvs_string[p_tlv->type],
2801 len += p_tlv->length;
2802 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2804 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2805 DP_NOTICE(p_hwfn, true,
2806 "TLVs has overrun the buffer size\n");
2809 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2815 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2816 struct ecore_sp_vport_update_params *p_data,
2817 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2819 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2820 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2822 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2823 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2827 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2828 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2829 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2830 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2831 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2835 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2836 struct ecore_sp_vport_update_params *p_data,
2837 struct ecore_vf_info *p_vf,
2838 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2840 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2841 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2843 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2844 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2848 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2850 /* Ignore the VF request if we're forcing a vlan */
2851 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2852 p_data->update_inner_vlan_removal_flg = 1;
2853 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2856 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2860 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2861 struct ecore_sp_vport_update_params *p_data,
2862 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2864 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2865 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2867 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2868 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2869 if (!p_tx_switch_tlv)
2873 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2874 DP_NOTICE(p_hwfn, false,
2875 "FPGA: Ignore tx-switching configuration originating"
2881 p_data->update_tx_switching_flg = 1;
2882 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2883 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2887 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2888 struct ecore_sp_vport_update_params *p_data,
2889 struct ecore_iov_vf_mbx *p_mbx,
2892 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2893 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2895 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2896 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2900 p_data->update_approx_mcast_flg = 1;
2901 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2902 sizeof(unsigned long) *
2903 ETH_MULTICAST_MAC_BINS_IN_REGS);
2904 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2908 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2909 struct ecore_sp_vport_update_params *p_data,
2910 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2912 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2913 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2914 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2916 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2917 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2921 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2922 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2923 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2924 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2925 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
2929 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
2930 struct ecore_sp_vport_update_params *p_data,
2931 struct ecore_iov_vf_mbx *p_mbx,
2934 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2935 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2937 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2938 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2939 if (!p_accept_any_vlan)
2942 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2943 p_data->update_accept_any_vlan_flg =
2944 p_accept_any_vlan->update_accept_any_vlan_flg;
2945 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2949 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
2950 struct ecore_vf_info *vf,
2951 struct ecore_sp_vport_update_params *p_data,
2952 struct ecore_rss_params *p_rss,
2953 struct ecore_iov_vf_mbx *p_mbx,
2954 u16 *tlvs_mask, u16 *tlvs_accepted)
2956 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2957 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2958 bool b_reject = false;
2962 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2963 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2965 p_data->rss_params = OSAL_NULL;
2969 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
2971 p_rss->update_rss_config =
2972 !!(p_rss_tlv->update_rss_flags &
2973 VFPF_UPDATE_RSS_CONFIG_FLAG);
2974 p_rss->update_rss_capabilities =
2975 !!(p_rss_tlv->update_rss_flags &
2976 VFPF_UPDATE_RSS_CAPS_FLAG);
2977 p_rss->update_rss_ind_table =
2978 !!(p_rss_tlv->update_rss_flags &
2979 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2980 p_rss->update_rss_key =
2981 !!(p_rss_tlv->update_rss_flags &
2982 VFPF_UPDATE_RSS_KEY_FLAG);
2984 p_rss->rss_enable = p_rss_tlv->rss_enable;
2985 p_rss->rss_eng_id = vf->rss_eng_id;
2986 p_rss->rss_caps = p_rss_tlv->rss_caps;
2987 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2988 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
2989 sizeof(p_rss->rss_key));
2991 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
2992 (1 << p_rss_tlv->rss_table_size_log));
2994 for (i = 0; i < table_size; i++) {
2995 struct ecore_queue_cid *p_cid;
2997 q_idx = p_rss_tlv->rss_ind_table[i];
2998 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
2999 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3000 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3001 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
3002 vf->relative_vf_id, q_idx);
3007 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
3008 p_rss->rss_ind_table[i] = p_cid;
3011 p_data->rss_params = p_rss;
3013 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3015 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3019 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
3020 struct ecore_sp_vport_update_params *p_data,
3021 struct ecore_sge_tpa_params *p_sge_tpa,
3022 struct ecore_iov_vf_mbx *p_mbx,
3025 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
3026 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
3028 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
3029 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3031 if (!p_sge_tpa_tlv) {
3032 p_data->sge_tpa_params = OSAL_NULL;
3036 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
3038 p_sge_tpa->update_tpa_en_flg =
3039 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
3040 p_sge_tpa->update_tpa_param_flg =
3041 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3042 VFPF_UPDATE_TPA_PARAM_FLAG);
3044 p_sge_tpa->tpa_ipv4_en_flg =
3045 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
3046 p_sge_tpa->tpa_ipv6_en_flg =
3047 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
3048 p_sge_tpa->tpa_pkt_split_flg =
3049 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
3050 p_sge_tpa->tpa_hdr_data_split_flg =
3051 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
3052 p_sge_tpa->tpa_gro_consistent_flg =
3053 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
3055 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
3056 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
3057 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
3058 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
3059 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
3061 p_data->sge_tpa_params = p_sge_tpa;
3063 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
3066 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
3067 struct ecore_ptt *p_ptt,
3068 struct ecore_vf_info *vf)
3070 struct ecore_rss_params *p_rss_params = OSAL_NULL;
3071 struct ecore_sp_vport_update_params params;
3072 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3073 struct ecore_sge_tpa_params sge_tpa_params;
3074 u16 tlvs_mask = 0, tlvs_accepted = 0;
3075 u8 status = PFVF_STATUS_SUCCESS;
3077 enum _ecore_status_t rc;
3079 /* Valiate PF can send such a request */
3080 if (!vf->vport_instance) {
3081 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3082 "No VPORT instance available for VF[%d],"
3083 " failing vport update\n",
3085 status = PFVF_STATUS_FAILURE;
3089 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3090 if (p_rss_params == OSAL_NULL) {
3091 status = PFVF_STATUS_FAILURE;
3095 OSAL_MEMSET(¶ms, 0, sizeof(params));
3096 params.opaque_fid = vf->opaque_fid;
3097 params.vport_id = vf->vport_id;
3098 params.rss_params = OSAL_NULL;
3100 /* Search for extended tlvs list and update values
3101 * from VF in struct ecore_sp_vport_update_params.
3103 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3104 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3105 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3106 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3107 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3108 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3109 ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
3110 &sge_tpa_params, mbx, &tlvs_mask);
3112 tlvs_accepted = tlvs_mask;
3114 /* Some of the extended TLVs need to be validated first; In that case,
3115 * they can update the mask without updating the accepted [so that
3116 * PF could communicate to VF it has rejected request].
3118 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3119 mbx, &tlvs_mask, &tlvs_accepted);
3121 /* Just log a message if there is no single extended tlv in buffer.
3122 * When all features of vport update ramrod would be requested by VF
3123 * as extended TLVs in buffer then an error can be returned in response
3124 * if there is no extended TLV present in buffer.
3126 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3127 ¶ms, &tlvs_accepted) !=
3130 status = PFVF_STATUS_NOT_SUPPORTED;
3134 if (!tlvs_accepted) {
3136 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3137 "Upper-layer prevents said VF"
3138 " configuration\n");
3140 DP_NOTICE(p_hwfn, true,
3141 "No feature tlvs found for vport update\n");
3142 status = PFVF_STATUS_NOT_SUPPORTED;
3146 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
3150 status = PFVF_STATUS_FAILURE;
3153 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3154 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3155 tlvs_mask, tlvs_accepted);
3156 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3159 static enum _ecore_status_t
3160 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3161 struct ecore_vf_info *p_vf,
3162 struct ecore_filter_ucast *p_params)
3166 /* First remove entries and then add new ones */
3167 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3168 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3169 if (p_vf->shadow_config.vlans[i].used &&
3170 p_vf->shadow_config.vlans[i].vid ==
3172 p_vf->shadow_config.vlans[i].used = false;
3175 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3176 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3177 "VF [%d] - Tries to remove a non-existing"
3179 p_vf->relative_vf_id);
3182 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3183 p_params->opcode == ECORE_FILTER_FLUSH) {
3184 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3185 p_vf->shadow_config.vlans[i].used = false;
3188 /* In forced mode, we're willing to remove entries - but we don't add
3191 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3192 return ECORE_SUCCESS;
3194 if (p_params->opcode == ECORE_FILTER_ADD ||
3195 p_params->opcode == ECORE_FILTER_REPLACE) {
3196 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3197 if (p_vf->shadow_config.vlans[i].used)
3200 p_vf->shadow_config.vlans[i].used = true;
3201 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3205 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3206 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3207 "VF [%d] - Tries to configure more than %d"
3209 p_vf->relative_vf_id,
3210 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3215 return ECORE_SUCCESS;
3218 static enum _ecore_status_t
3219 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3220 struct ecore_vf_info *p_vf,
3221 struct ecore_filter_ucast *p_params)
3223 char empty_mac[ETH_ALEN];
3226 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3228 /* If we're in forced-mode, we don't allow any change */
3229 /* TODO - this would change if we were ever to implement logic for
3230 * removing a forced MAC altogether [in which case, like for vlans,
3231 * we should be able to re-trace previous configuration.
3233 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3234 return ECORE_SUCCESS;
3236 /* First remove entries and then add new ones */
3237 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3238 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3239 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3240 p_params->mac, ETH_ALEN)) {
3241 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3247 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3248 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3249 "MAC isn't configured\n");
3252 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3253 p_params->opcode == ECORE_FILTER_FLUSH) {
3254 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3255 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3258 /* List the new MAC address */
3259 if (p_params->opcode != ECORE_FILTER_ADD &&
3260 p_params->opcode != ECORE_FILTER_REPLACE)
3261 return ECORE_SUCCESS;
3263 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3264 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3265 empty_mac, ETH_ALEN)) {
3266 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3267 p_params->mac, ETH_ALEN);
3268 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3269 "Added MAC at %d entry in shadow\n", i);
3274 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3275 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3276 "No available place for MAC\n");
3280 return ECORE_SUCCESS;
3283 static enum _ecore_status_t
3284 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3285 struct ecore_vf_info *p_vf,
3286 struct ecore_filter_ucast *p_params)
3288 enum _ecore_status_t rc = ECORE_SUCCESS;
3290 if (p_params->type == ECORE_FILTER_MAC) {
3291 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3292 if (rc != ECORE_SUCCESS)
3296 if (p_params->type == ECORE_FILTER_VLAN)
3297 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3302 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3303 struct ecore_ptt *p_ptt,
3304 struct ecore_vf_info *vf)
3306 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3307 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3308 struct vfpf_ucast_filter_tlv *req;
3309 u8 status = PFVF_STATUS_SUCCESS;
3310 struct ecore_filter_ucast params;
3311 enum _ecore_status_t rc;
3313 /* Prepare the unicast filter params */
3314 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3315 req = &mbx->req_virt->ucast_filter;
3316 params.opcode = (enum ecore_filter_opcode)req->opcode;
3317 params.type = (enum ecore_filter_ucast_type)req->type;
3319 /* @@@TBD - We might need logic on HV side in determining this */
3320 params.is_rx_filter = 1;
3321 params.is_tx_filter = 1;
3322 params.vport_to_remove_from = vf->vport_id;
3323 params.vport_to_add_to = vf->vport_id;
3324 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3325 params.vlan = req->vlan;
3327 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3328 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3329 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3330 vf->abs_vf_id, params.opcode, params.type,
3331 params.is_rx_filter ? "RX" : "",
3332 params.is_tx_filter ? "TX" : "",
3333 params.vport_to_add_to,
3334 params.mac[0], params.mac[1], params.mac[2],
3335 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3337 if (!vf->vport_instance) {
3338 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3339 "No VPORT instance available for VF[%d],"
3340 " failing ucast MAC configuration\n",
3342 status = PFVF_STATUS_FAILURE;
3346 /* Update shadow copy of the VF configuration */
3347 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3349 status = PFVF_STATUS_FAILURE;
3353 /* Determine if the unicast filtering is acceptible by PF */
3354 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3355 (params.type == ECORE_FILTER_VLAN ||
3356 params.type == ECORE_FILTER_MAC_VLAN)) {
3357 /* Once VLAN is forced or PVID is set, do not allow
3358 * to add/replace any further VLANs.
3360 if (params.opcode == ECORE_FILTER_ADD ||
3361 params.opcode == ECORE_FILTER_REPLACE)
3362 status = PFVF_STATUS_FORCED;
3366 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3367 (params.type == ECORE_FILTER_MAC ||
3368 params.type == ECORE_FILTER_MAC_VLAN)) {
3369 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3370 (params.opcode != ECORE_FILTER_ADD &&
3371 params.opcode != ECORE_FILTER_REPLACE))
3372 status = PFVF_STATUS_FORCED;
3376 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3377 if (rc == ECORE_EXISTS) {
3379 } else if (rc == ECORE_INVAL) {
3380 status = PFVF_STATUS_FAILURE;
3384 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3385 ECORE_SPQ_MODE_CB, OSAL_NULL);
3387 status = PFVF_STATUS_FAILURE;
3390 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3391 sizeof(struct pfvf_def_resp_tlv), status);
3394 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3395 struct ecore_ptt *p_ptt,
3396 struct ecore_vf_info *vf)
3401 for (i = 0; i < vf->num_sbs; i++)
3402 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3404 vf->opaque_fid, false);
3406 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3407 sizeof(struct pfvf_def_resp_tlv),
3408 PFVF_STATUS_SUCCESS);
3411 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3412 struct ecore_ptt *p_ptt,
3413 struct ecore_vf_info *vf)
3415 u16 length = sizeof(struct pfvf_def_resp_tlv);
3416 u8 status = PFVF_STATUS_SUCCESS;
3418 /* Disable Interrupts for VF */
3419 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3421 /* Reset Permission table */
3422 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3424 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3428 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3429 struct ecore_ptt *p_ptt,
3430 struct ecore_vf_info *p_vf)
3432 u16 length = sizeof(struct pfvf_def_resp_tlv);
3433 u8 status = PFVF_STATUS_SUCCESS;
3434 enum _ecore_status_t rc = ECORE_SUCCESS;
3436 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3438 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3439 /* Stopping the VF */
3440 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3443 if (rc != ECORE_SUCCESS) {
3444 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3446 status = PFVF_STATUS_FAILURE;
3449 p_vf->state = VF_STOPPED;
3452 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3456 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
3457 struct ecore_ptt *p_ptt,
3458 struct ecore_vf_info *p_vf)
3460 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3461 struct pfvf_read_coal_resp_tlv *p_resp;
3462 struct vfpf_read_coal_req_tlv *req;
3463 u8 status = PFVF_STATUS_FAILURE;
3464 struct ecore_vf_queue *p_queue;
3465 struct ecore_queue_cid *p_cid;
3466 enum _ecore_status_t rc = ECORE_SUCCESS;
3467 u16 coal = 0, qid, i;
3470 mbx->offset = (u8 *)mbx->reply_virt;
3471 req = &mbx->req_virt->read_coal_req;
3474 b_is_rx = req->is_rx ? true : false;
3477 if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
3478 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3479 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3480 "VF[%d]: Invalid Rx queue_id = %d\n",
3481 p_vf->abs_vf_id, qid);
3485 p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3486 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3487 if (rc != ECORE_SUCCESS)
3490 if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
3491 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3492 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3493 "VF[%d]: Invalid Tx queue_id = %d\n",
3494 p_vf->abs_vf_id, qid);
3497 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3498 p_queue = &p_vf->vf_queues[qid];
3499 if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
3500 (!p_queue->cids[i].b_is_tx))
3503 p_cid = p_queue->cids[i].p_cid;
3505 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
3507 if (rc != ECORE_SUCCESS)
3513 status = PFVF_STATUS_SUCCESS;
3516 p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
3518 p_resp->coal = coal;
3520 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
3521 sizeof(struct channel_list_end_tlv));
3523 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3526 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3527 struct ecore_ptt *p_ptt,
3528 struct ecore_vf_info *vf)
3530 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3531 enum _ecore_status_t rc = ECORE_SUCCESS;
3532 struct vfpf_update_coalesce *req;
3533 u8 status = PFVF_STATUS_FAILURE;
3534 struct ecore_queue_cid *p_cid;
3535 u16 rx_coal, tx_coal;
3539 req = &mbx->req_virt->update_coalesce;
3541 rx_coal = req->rx_coal;
3542 tx_coal = req->tx_coal;
3545 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3546 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3548 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3549 vf->abs_vf_id, qid);
3553 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3554 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3556 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3557 vf->abs_vf_id, qid);
3561 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3562 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3563 vf->abs_vf_id, rx_coal, tx_coal, qid);
3566 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3568 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3569 if (rc != ECORE_SUCCESS) {
3570 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3571 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3572 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3575 vf->rx_coal = rx_coal;
3578 /* TODO - in future, it might be possible to pass this in a per-cid
3579 * granularity. For now, do this for all Tx queues.
3582 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3584 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3585 if (p_queue->cids[i].p_cid == OSAL_NULL)
3588 if (!p_queue->cids[i].b_is_tx)
3591 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3592 p_queue->cids[i].p_cid);
3593 if (rc != ECORE_SUCCESS) {
3594 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3595 "VF[%d]: Unable to set tx queue coalesce\n",
3600 vf->tx_coal = tx_coal;
3603 status = PFVF_STATUS_SUCCESS;
3605 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3606 sizeof(struct pfvf_def_resp_tlv), status);
3609 enum _ecore_status_t
3610 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
3611 u16 rx_coal, u16 tx_coal,
3614 struct ecore_queue_cid *p_cid;
3615 struct ecore_vf_info *vf;
3616 struct ecore_ptt *p_ptt;
3619 if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
3620 DP_NOTICE(p_hwfn, true,
3621 "VF[%d] - Can not set coalescing: VF is not active\n",
3626 vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
3627 p_ptt = ecore_ptt_acquire(p_hwfn);
3631 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3632 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3634 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3635 vf->abs_vf_id, qid);
3639 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3640 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3642 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3643 vf->abs_vf_id, qid);
3647 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3648 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3649 vf->abs_vf_id, rx_coal, tx_coal, qid);
3652 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3654 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3655 if (rc != ECORE_SUCCESS) {
3656 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3657 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3658 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3661 vf->rx_coal = rx_coal;
3664 /* TODO - in future, it might be possible to pass this in a per-cid
3665 * granularity. For now, do this for all Tx queues.
3668 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3670 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3671 if (p_queue->cids[i].p_cid == OSAL_NULL)
3674 if (!p_queue->cids[i].b_is_tx)
3677 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3678 p_queue->cids[i].p_cid);
3679 if (rc != ECORE_SUCCESS) {
3680 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3681 "VF[%d]: Unable to set tx queue coalesce\n",
3686 vf->tx_coal = tx_coal;
3690 ecore_ptt_release(p_hwfn, p_ptt);
3695 static enum _ecore_status_t
3696 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3697 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3702 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3704 for (cnt = 0; cnt < 50; cnt++) {
3705 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3710 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3714 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3715 p_vf->abs_vf_id, val);
3716 return ECORE_TIMEOUT;
3719 return ECORE_SUCCESS;
3722 static enum _ecore_status_t
3723 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3724 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3726 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3729 /* Read initial consumers & producers */
3730 for (i = 0; i < MAX_NUM_VOQS; i++) {
3733 cons[i] = ecore_rd(p_hwfn, p_ptt,
3734 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3736 prod = ecore_rd(p_hwfn, p_ptt,
3737 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3739 distance[i] = prod - cons[i];
3742 /* Wait for consumers to pass the producers */
3744 for (cnt = 0; cnt < 50; cnt++) {
3745 for (; i < MAX_NUM_VOQS; i++) {
3748 tmp = ecore_rd(p_hwfn, p_ptt,
3749 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3751 if (distance[i] > tmp - cons[i])
3755 if (i == MAX_NUM_VOQS)
3762 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3763 p_vf->abs_vf_id, i);
3764 return ECORE_TIMEOUT;
3767 return ECORE_SUCCESS;
3770 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3771 struct ecore_vf_info *p_vf,
3772 struct ecore_ptt *p_ptt)
3774 enum _ecore_status_t rc;
3776 /* TODO - add SRC and TM polling once we add storage IOV */
3778 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3782 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3786 return ECORE_SUCCESS;
3789 static enum _ecore_status_t
3790 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3791 struct ecore_ptt *p_ptt,
3792 u16 rel_vf_id, u32 *ack_vfs)
3794 struct ecore_vf_info *p_vf;
3795 enum _ecore_status_t rc = ECORE_SUCCESS;
3797 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3799 return ECORE_SUCCESS;
3801 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3802 (1ULL << (rel_vf_id % 64))) {
3803 u16 vfid = p_vf->abs_vf_id;
3805 /* TODO - should we lock channel? */
3807 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3808 "VF[%d] - Handling FLR\n", vfid);
3810 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3812 /* If VF isn't active, no need for anything but SW */
3816 /* TODO - what to do in case of failure? */
3817 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3818 if (rc != ECORE_SUCCESS)
3821 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3823 /* TODO - what's now? What a mess.... */
3824 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3828 /* Workaround to make VF-PF channel ready, as FW
3829 * doesn't do that as a part of FLR.
3832 GTT_BAR0_MAP_REG_USDM_RAM +
3833 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3835 /* VF_STOPPED has to be set only after final cleanup
3836 * but prior to re-enabling the VF.
3838 p_vf->state = VF_STOPPED;
3840 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3842 /* TODO - again, a mess... */
3843 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3848 /* Mark VF for ack and clean pending state */
3849 if (p_vf->state == VF_RESET)
3850 p_vf->state = VF_STOPPED;
3851 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3852 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3853 ~(1ULL << (rel_vf_id % 64));
3854 p_vf->vf_mbx.b_pending_msg = false;
3860 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3861 struct ecore_ptt *p_ptt)
3863 u32 ack_vfs[VF_MAX_STATIC / 32];
3864 enum _ecore_status_t rc = ECORE_SUCCESS;
3867 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3869 /* Since BRB <-> PRS interface can't be tested as part of the flr
3870 * polling due to HW limitations, simply sleep a bit. And since
3871 * there's no need to wait per-vf, do it before looping.
3875 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3876 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3878 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3882 enum _ecore_status_t
3883 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3884 struct ecore_ptt *p_ptt, u16 rel_vf_id)
3886 u32 ack_vfs[VF_MAX_STATIC / 32];
3887 enum _ecore_status_t rc = ECORE_SUCCESS;
3889 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3891 /* Wait instead of polling the BRB <-> PRS interface */
3894 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3896 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3900 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3905 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3906 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3907 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3908 "[%08x,...,%08x]: %08x\n",
3909 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3911 if (!p_hwfn->p_dev->p_iov_info) {
3912 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3917 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3918 struct ecore_vf_info *p_vf;
3921 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
3925 vfid = p_vf->abs_vf_id;
3926 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3927 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3928 u16 rel_vf_id = p_vf->relative_vf_id;
3930 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3931 "VF[%d] [rel %d] got FLR-ed\n",
3934 p_vf->state = VF_RESET;
3936 /* No need to lock here, since pending_flr should
3937 * only change here and before ACKing MFw. Since
3938 * MFW will not trigger an additional attention for
3939 * VF flr until ACKs, we're safe.
3941 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3949 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
3951 struct ecore_mcp_link_params *p_params,
3952 struct ecore_mcp_link_state *p_link,
3953 struct ecore_mcp_link_capabilities *p_caps)
3955 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
3956 struct ecore_bulletin_content *p_bulletin;
3961 p_bulletin = p_vf->bulletin.p_virt;
3964 __ecore_vf_get_link_params(p_params, p_bulletin);
3966 __ecore_vf_get_link_state(p_link, p_bulletin);
3968 __ecore_vf_get_link_caps(p_caps, p_bulletin);
3971 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
3972 struct ecore_ptt *p_ptt, int vfid)
3974 struct ecore_iov_vf_mbx *mbx;
3975 struct ecore_vf_info *p_vf;
3977 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3981 mbx = &p_vf->vf_mbx;
3983 /* ecore_iov_process_mbx_request */
3984 #ifndef CONFIG_ECORE_SW_CHANNEL
3985 if (!mbx->b_pending_msg) {
3986 DP_NOTICE(p_hwfn, true,
3987 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3991 mbx->b_pending_msg = false;
3994 mbx->first_tlv = mbx->req_virt->first_tlv;
3996 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3997 "VF[%02x]: Processing mailbox message [type %04x]\n",
3998 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4000 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
4001 p_vf->relative_vf_id,
4002 mbx->first_tlv.tl.type);
4004 /* Lock the per vf op mutex and note the locker's identity.
4005 * The unlock will take place in mbx response.
4007 ecore_iov_lock_vf_pf_channel(p_hwfn,
4008 p_vf, mbx->first_tlv.tl.type);
4010 /* check if tlv type is known */
4011 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
4012 !p_vf->b_malicious) {
4013 /* switch on the opcode */
4014 switch (mbx->first_tlv.tl.type) {
4015 case CHANNEL_TLV_ACQUIRE:
4016 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
4018 case CHANNEL_TLV_VPORT_START:
4019 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
4021 case CHANNEL_TLV_VPORT_TEARDOWN:
4022 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
4024 case CHANNEL_TLV_START_RXQ:
4025 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
4027 case CHANNEL_TLV_START_TXQ:
4028 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
4030 case CHANNEL_TLV_STOP_RXQS:
4031 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
4033 case CHANNEL_TLV_STOP_TXQS:
4034 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
4036 case CHANNEL_TLV_UPDATE_RXQ:
4037 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
4039 case CHANNEL_TLV_VPORT_UPDATE:
4040 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
4042 case CHANNEL_TLV_UCAST_FILTER:
4043 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
4045 case CHANNEL_TLV_CLOSE:
4046 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
4048 case CHANNEL_TLV_INT_CLEANUP:
4049 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
4051 case CHANNEL_TLV_RELEASE:
4052 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
4054 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
4055 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
4057 case CHANNEL_TLV_COALESCE_UPDATE:
4058 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
4060 case CHANNEL_TLV_COALESCE_READ:
4061 ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
4064 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
4065 /* If we've received a message from a VF we consider malicious
4066 * we ignore the messasge unless it's one for RELEASE, in which
4067 * case we'll let it have the benefit of doubt, allowing the
4068 * next loaded driver to start again.
4070 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
4071 /* TODO - initiate FLR, remove malicious indication */
4072 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4073 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
4076 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4077 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
4078 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4081 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4082 mbx->first_tlv.tl.type,
4083 sizeof(struct pfvf_def_resp_tlv),
4084 PFVF_STATUS_MALICIOUS);
4086 /* unknown TLV - this may belong to a VF driver from the future
4087 * - a version written after this PF driver was written, which
4088 * supports features unknown as of yet. Too bad since we don't
4089 * support them. Or this may be because someone wrote a crappy
4090 * VF driver and is sending garbage over the channel.
4092 DP_NOTICE(p_hwfn, false,
4093 "VF[%02x]: unknown TLV. type %04x length %04x"
4094 " padding %08x reply address %lu\n",
4096 mbx->first_tlv.tl.type,
4097 mbx->first_tlv.tl.length,
4098 mbx->first_tlv.padding,
4099 (unsigned long)mbx->first_tlv.reply_address);
4101 /* Try replying in case reply address matches the acquisition's
4104 if (p_vf->acquire.first_tlv.reply_address &&
4105 (mbx->first_tlv.reply_address ==
4106 p_vf->acquire.first_tlv.reply_address))
4107 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4108 mbx->first_tlv.tl.type,
4109 sizeof(struct pfvf_def_resp_tlv),
4110 PFVF_STATUS_NOT_SUPPORTED);
4112 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4113 "VF[%02x]: Can't respond to TLV -"
4114 " no valid reply address\n",
4118 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
4119 mbx->first_tlv.tl.type);
4121 #ifdef CONFIG_ECORE_SW_CHANNEL
4122 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
4123 mbx->sw_mbx.response_offset = 0;
4127 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
4132 OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4134 ecore_for_each_vf(p_hwfn, i) {
4135 struct ecore_vf_info *p_vf;
4137 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
4138 if (p_vf->vf_mbx.b_pending_msg)
4139 events[i / 64] |= 1ULL << (i % 64);
4143 static struct ecore_vf_info *
4144 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
4146 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
4148 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4149 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4150 "Got indication for VF [abs 0x%08x] that cannot be"
4156 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
4159 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
4161 struct regpair *vf_msg)
4163 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
4167 return ECORE_SUCCESS;
4169 /* List the physical address of the request so that handler
4170 * could later on copy the message from it.
4172 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4174 p_vf->vf_mbx.b_pending_msg = true;
4176 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
4179 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
4180 struct malicious_vf_eqe_data *p_data)
4182 struct ecore_vf_info *p_vf;
4184 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
4190 "VF [%d] - Malicious behavior [%02x]\n",
4191 p_vf->abs_vf_id, p_data->errId);
4193 p_vf->b_malicious = true;
4195 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
4198 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
4201 union event_ring_data *data)
4204 case COMMON_EVENT_VF_PF_CHANNEL:
4205 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
4206 &data->vf_pf_channel.msg_addr);
4207 case COMMON_EVENT_VF_FLR:
4208 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4209 "VF-FLR is still not supported\n");
4210 return ECORE_SUCCESS;
4211 case COMMON_EVENT_MALICIOUS_VF:
4212 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4213 return ECORE_SUCCESS;
4215 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
4221 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4223 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4224 (1ULL << (rel_vf_id % 64)));
4227 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4229 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
4235 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4236 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4240 return E4_MAX_NUM_VFS;
4243 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4244 struct ecore_ptt *ptt, int vfid)
4246 struct ecore_dmae_params params;
4247 struct ecore_vf_info *vf_info;
4249 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4253 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
4254 params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
4255 params.src_vfid = vf_info->abs_vf_id;
4257 if (ecore_dmae_host2host(p_hwfn, ptt,
4258 vf_info->vf_mbx.pending_req,
4259 vf_info->vf_mbx.req_phys,
4260 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4261 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4262 "Failed to copy message from VF 0x%02x\n", vfid);
4267 return ECORE_SUCCESS;
4270 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4273 struct ecore_vf_info *vf_info;
4276 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4278 DP_NOTICE(p_hwfn->p_dev, true,
4279 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4282 if (vf_info->b_malicious) {
4283 DP_NOTICE(p_hwfn->p_dev, false,
4284 "Can't set forced MAC to malicious VF [%d]\n",
4289 feature = 1 << MAC_ADDR_FORCED;
4290 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4292 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4293 /* Forced MAC will disable MAC_ADDR */
4294 vf_info->bulletin.p_virt->valid_bitmap &=
4295 ~(1 << VFPF_BULLETIN_MAC_ADDR);
4297 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4300 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4303 struct ecore_vf_info *vf_info;
4306 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4308 DP_NOTICE(p_hwfn->p_dev, true,
4309 "Can not set MAC, invalid vfid [%d]\n", vfid);
4312 if (vf_info->b_malicious) {
4313 DP_NOTICE(p_hwfn->p_dev, false,
4314 "Can't set MAC to malicious VF [%d]\n",
4319 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4320 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4321 "Can not set MAC, Forced MAC is configured\n");
4325 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4326 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4328 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4330 return ECORE_SUCCESS;
4333 enum _ecore_status_t
4334 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4335 bool b_untagged_only, int vfid)
4337 struct ecore_vf_info *vf_info;
4340 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4342 DP_NOTICE(p_hwfn->p_dev, true,
4343 "Can not set untagged default, invalid vfid [%d]\n",
4347 if (vf_info->b_malicious) {
4348 DP_NOTICE(p_hwfn->p_dev, false,
4349 "Can't set untagged default to malicious VF [%d]\n",
4354 /* Since this is configurable only during vport-start, don't take it
4355 * if we're past that point.
4357 if (vf_info->state == VF_ENABLED) {
4358 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4359 "Can't support untagged change for vfid[%d] -"
4360 " VF is already active\n",
4365 /* Set configuration; This will later be taken into account during the
4366 * VF initialization.
4368 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4369 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4370 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4372 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4375 return ECORE_SUCCESS;
4378 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4381 struct ecore_vf_info *vf_info;
4383 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4387 *opaque_fid = vf_info->opaque_fid;
4390 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4393 struct ecore_vf_info *vf_info;
4396 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4398 DP_NOTICE(p_hwfn->p_dev, true,
4399 "Can not set forced MAC, invalid vfid [%d]\n",
4403 if (vf_info->b_malicious) {
4404 DP_NOTICE(p_hwfn->p_dev, false,
4405 "Can't set forced vlan to malicious VF [%d]\n",
4410 feature = 1 << VLAN_ADDR_FORCED;
4411 vf_info->bulletin.p_virt->pvid = pvid;
4413 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4415 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4417 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4420 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4421 int vfid, u16 vxlan_port, u16 geneve_port)
4423 struct ecore_vf_info *vf_info;
4425 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4427 DP_NOTICE(p_hwfn->p_dev, true,
4428 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4432 if (vf_info->b_malicious) {
4433 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4434 "Can not set udp ports to malicious VF [%d]\n",
4439 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4440 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4443 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4445 struct ecore_vf_info *p_vf_info;
4447 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4451 return !!p_vf_info->vport_instance;
4454 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4456 struct ecore_vf_info *p_vf_info;
4458 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4462 return p_vf_info->state == VF_STOPPED;
4465 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4467 struct ecore_vf_info *vf_info;
4469 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4473 return vf_info->spoof_chk;
4476 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4479 struct ecore_vf_info *vf;
4480 enum _ecore_status_t rc = ECORE_INVAL;
4482 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4483 DP_NOTICE(p_hwfn, true,
4484 "SR-IOV sanity check failed, can't set spoofchk\n");
4488 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4492 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4493 /* After VF VPORT start PF will configure spoof check */
4494 vf->req_spoofchk_val = val;
4499 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4505 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4507 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4509 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4510 : ECORE_MAX_VF_CHAINS_PER_PF;
4512 return max_chains_per_vf;
4515 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4517 void **pp_req_virt_addr,
4518 u16 *p_req_virt_size)
4520 struct ecore_vf_info *vf_info =
4521 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4526 if (pp_req_virt_addr)
4527 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4529 if (p_req_virt_size)
4530 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4533 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4535 void **pp_reply_virt_addr,
4536 u16 *p_reply_virt_size)
4538 struct ecore_vf_info *vf_info =
4539 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4544 if (pp_reply_virt_addr)
4545 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4547 if (p_reply_virt_size)
4548 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4551 #ifdef CONFIG_ECORE_SW_CHANNEL
4552 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4555 struct ecore_vf_info *vf_info =
4556 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4561 return &vf_info->vf_mbx.sw_mbx;
4565 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4567 return (length >= sizeof(struct vfpf_first_tlv) &&
4568 (length <= sizeof(union vfpf_tlvs)));
4571 u32 ecore_iov_pfvf_msg_length(void)
4573 return sizeof(union pfvf_tlvs);
4576 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4578 struct ecore_vf_info *p_vf;
4580 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4581 if (!p_vf || !p_vf->bulletin.p_virt)
4584 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4587 return p_vf->bulletin.p_virt->mac;
4590 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4593 struct ecore_vf_info *p_vf;
4595 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4596 if (!p_vf || !p_vf->bulletin.p_virt)
4599 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4602 return p_vf->bulletin.p_virt->pvid;
4605 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4606 struct ecore_ptt *p_ptt,
4609 struct ecore_vf_info *vf;
4611 enum _ecore_status_t rc;
4613 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4618 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4619 if (rc != ECORE_SUCCESS)
4622 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
4625 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4626 struct ecore_ptt *p_ptt,
4628 struct ecore_eth_stats *p_stats)
4630 struct ecore_vf_info *vf;
4632 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4636 if (vf->state != VF_ENABLED)
4639 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4640 vf->abs_vf_id + 0x10, false);
4642 return ECORE_SUCCESS;
4645 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4647 struct ecore_vf_info *p_vf;
4649 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4653 return p_vf->num_rxqs;
4656 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4658 struct ecore_vf_info *p_vf;
4660 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4664 return p_vf->num_active_rxqs;
4667 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4669 struct ecore_vf_info *p_vf;
4671 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4678 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4680 struct ecore_vf_info *p_vf;
4682 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4686 return p_vf->num_sbs;
4689 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4691 struct ecore_vf_info *p_vf;
4693 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4697 return (p_vf->state == VF_FREE);
4700 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4703 struct ecore_vf_info *p_vf;
4705 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4709 return (p_vf->state == VF_ACQUIRED);
4712 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4714 struct ecore_vf_info *p_vf;
4716 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4720 return (p_vf->state == VF_ENABLED);
4723 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4726 struct ecore_vf_info *p_vf;
4728 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4732 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4735 enum _ecore_status_t
4736 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4738 struct ecore_wfq_data *vf_vp_wfq;
4739 struct ecore_vf_info *vf_info;
4741 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4745 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4747 if (vf_vp_wfq->configured)
4748 return vf_vp_wfq->min_speed;