2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_hsi_eth.h"
12 #include "ecore_sriov.h"
13 #include "ecore_l2_api.h"
15 #include "ecore_vfpf_if.h"
16 #include "ecore_status.h"
18 #include "ecore_int.h"
20 #include "ecore_mcp_api.h"
21 #include "ecore_vf_api.h"
23 static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
25 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
28 /* This lock is released when we receive PF's response
29 * in ecore_send_msg2pf().
30 * So, ecore_vf_pf_prep() and ecore_send_msg2pf()
31 * must come in sequence.
33 OSAL_MUTEX_ACQUIRE(&p_iov->mutex);
35 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
36 "preparing to send %s tlv over vf pf channel\n",
37 ecore_channel_tlvs_string[type]);
39 /* Reset Request offset */
40 p_iov->offset = (u8 *)(p_iov->vf2pf_request);
42 /* Clear mailbox - both request and reply */
43 OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
44 OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
46 /* Init type and length */
47 p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
49 /* Init first tlv header */
50 ((struct vfpf_first_tlv *)p_tlv)->reply_address =
51 (u64)p_iov->pf2vf_reply_phys;
56 static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
57 enum _ecore_status_t req_status)
59 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
61 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
62 "VF request status = 0x%x, PF reply status = 0x%x\n",
63 req_status, resp->default_resp.hdr.status);
65 OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
68 static enum _ecore_status_t
69 ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
70 u8 *done, u32 resp_size)
72 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
73 struct ustorm_trigger_vf_zone trigger;
74 struct ustorm_vf_zone *zone_data;
75 enum _ecore_status_t rc = ECORE_SUCCESS;
78 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
80 /* output tlvs list */
81 ecore_dp_tlv_list(p_hwfn, p_req);
83 /* need to add the END TLV to the message size */
84 resp_size += sizeof(struct channel_list_end_tlv);
86 if (!p_hwfn->p_dev->b_hw_channel) {
87 rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,
90 p_hwfn->vf_iov_info->pf2vf_reply,
91 sizeof(union vfpf_tlvs), resp_size);
92 /* TODO - no prints about message ? */
96 /* Send TLVs over HW channel */
97 OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
98 trigger.vf_pf_msg_valid = 1;
100 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
101 "VF -> PF [%02x] message: [%08x, %08x] --> %p,"
103 GET_FIELD(p_hwfn->hw_info.concrete_fid,
104 PXP_CONCRETE_FID_PFID),
105 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),
106 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),
107 &zone_data->non_trigger.vf_pf_msg_addr,
108 *((u32 *)&trigger), &zone_data->trigger);
111 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
112 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys));
115 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
116 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys));
118 /* The message data must be written first, to prevent trigger before
121 OSAL_WMB(p_hwfn->p_dev);
123 REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger,
126 /* When PF would be done with the response, it would write back to the
127 * `done' address. Poll until then.
129 while ((!*done) && time) {
135 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
136 "VF <-- PF Timeout [Type %d]\n",
137 p_req->first_tlv.tl.type);
141 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
142 "PF response: %d [Type %d]\n",
143 *done, p_req->first_tlv.tl.type);
149 #define VF_ACQUIRE_THRESH 3
150 static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
151 struct vf_pf_resc_request *p_req,
152 struct pf_vf_resc *p_resp)
154 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
155 "PF unwilling to fullill resource request: rxq [%02x/%02x]"
156 " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
157 " vlan [%02x/%02x] mc [%02x/%02x]."
158 " Try PF recommended amount\n",
159 p_req->num_rxqs, p_resp->num_rxqs,
160 p_req->num_rxqs, p_resp->num_txqs,
161 p_req->num_sbs, p_resp->num_sbs,
162 p_req->num_mac_filters, p_resp->num_mac_filters,
163 p_req->num_vlan_filters, p_resp->num_vlan_filters,
164 p_req->num_mc_filters, p_resp->num_mc_filters);
166 /* humble our request */
167 p_req->num_txqs = p_resp->num_txqs;
168 p_req->num_rxqs = p_resp->num_rxqs;
169 p_req->num_sbs = p_resp->num_sbs;
170 p_req->num_mac_filters = p_resp->num_mac_filters;
171 p_req->num_vlan_filters = p_resp->num_vlan_filters;
172 p_req->num_mc_filters = p_resp->num_mc_filters;
175 static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
177 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
178 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
179 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
180 struct ecore_vf_acquire_sw_info vf_sw_info;
181 struct vf_pf_resc_request *p_resc;
182 bool resources_acquired = false;
183 struct vfpf_acquire_tlv *req;
185 enum _ecore_status_t rc = ECORE_SUCCESS;
187 /* clear mailbox and prep first tlv */
188 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
189 p_resc = &req->resc_request;
191 /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */
192 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
194 p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF;
195 p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF;
196 p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
197 p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
198 p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
200 OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
201 OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
203 req->vfdev_info.os_type = vf_sw_info.os_type;
204 req->vfdev_info.driver_version = vf_sw_info.driver_version;
205 req->vfdev_info.fw_major = FW_MAJOR_VERSION;
206 req->vfdev_info.fw_minor = FW_MINOR_VERSION;
207 req->vfdev_info.fw_revision = FW_REVISION_VERSION;
208 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
209 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
210 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
212 /* Fill capability field with any non-deprecated config we support */
213 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
215 /* pf 2 vf bulletin board address */
216 req->bulletin_addr = p_iov->bulletin.phys;
217 req->bulletin_size = p_iov->bulletin.size;
219 /* add list termination tlv */
220 ecore_add_tlv(p_hwfn, &p_iov->offset,
221 CHANNEL_TLV_LIST_END,
222 sizeof(struct channel_list_end_tlv));
224 while (!resources_acquired) {
225 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
226 "attempting to acquire resources\n");
228 /* Clear response buffer, as this might be a re-send */
229 OSAL_MEMSET(p_iov->pf2vf_reply, 0,
230 sizeof(union pfvf_tlvs));
232 /* send acquire request */
233 rc = ecore_send_msg2pf(p_hwfn,
234 &resp->hdr.status, sizeof(*resp));
240 /* copy acquire response from buffer to p_hwfn */
241 OSAL_MEMCPY(&p_iov->acquire_resp,
242 resp, sizeof(p_iov->acquire_resp));
246 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
247 /* PF agrees to allocate our resources */
248 if (!(resp->pfdev_info.capabilities &
249 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
250 /* It's possible legacy PF mistakenly accepted;
251 * but we don't care - simply mark it as
252 * legacy and continue.
254 req->vfdev_info.capabilities |=
255 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
257 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
258 "resources acquired\n");
259 resources_acquired = true;
260 } /* PF refuses to allocate our resources */
261 else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
262 attempts < VF_ACQUIRE_THRESH) {
263 ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
266 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
267 if (pfdev_info->major_fp_hsi &&
268 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
269 DP_NOTICE(p_hwfn, false,
270 "PF uses an incompatible fastpath HSI"
271 " %02x.%02x [VF requires %02x.%02x]."
272 " Please change to a VF driver using"
274 pfdev_info->major_fp_hsi,
275 pfdev_info->minor_fp_hsi,
276 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR,
277 pfdev_info->major_fp_hsi);
282 if (!pfdev_info->major_fp_hsi) {
283 if (req->vfdev_info.capabilities &
284 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
285 DP_NOTICE(p_hwfn, false,
286 "PF uses very old drivers."
287 " Please change to a VF"
288 " driver using no later than"
294 "PF is old - try re-acquire to"
295 " see if it supports FW-version"
297 req->vfdev_info.capabilities |=
298 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
303 /* If PF/VF are using same Major, PF must have had
304 * it's reasons. Simply fail.
306 DP_NOTICE(p_hwfn, false,
307 "PF rejected acquisition by VF\n");
312 "PF returned err %d to VF acquisition request\n",
319 /* Mark the PF as legacy, if needed */
320 if (req->vfdev_info.capabilities &
321 VFPF_ACQUIRE_CAP_PRE_FP_HSI)
322 p_iov->b_pre_fp_hsi = true;
324 rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
326 DP_NOTICE(p_hwfn, true,
327 "VF_UPDATE_ACQUIRE_RESC_RESP Failed:"
334 /* Update bulletin board size with response from PF */
335 p_iov->bulletin.size = resp->bulletin_size;
338 p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
339 p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;
341 DP_INFO(p_hwfn, "Chip details - %s%d\n",
342 ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
343 CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
345 p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
347 /* Learn of the possibility of CMT */
348 if (IS_LEAD_HWFN(p_hwfn)) {
349 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
350 DP_INFO(p_hwfn, "100g VF\n");
351 p_hwfn->p_dev->num_hwfns = 2;
356 if ((~p_iov->b_pre_fp_hsi &
357 ETH_HSI_VER_MINOR) &&
358 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR))
360 "PF is using older fastpath HSI;"
361 " %02x.%02x is configured\n",
363 resp->pfdev_info.minor_fp_hsi);
366 ecore_vf_pf_req_end(p_hwfn, rc);
371 enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
373 struct ecore_vf_iov *p_iov;
376 /* Set number of hwfns - might be overridden once leading hwfn learns
377 * actual configuration from PF.
379 if (IS_LEAD_HWFN(p_hwfn))
380 p_hwfn->p_dev->num_hwfns = 1;
382 /* Set the doorbell bar. Assumption: regview is set */
383 p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
384 PXP_VF_BAR0_START_DQ;
386 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
387 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
389 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
390 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
392 /* Allocate vf sriov info */
393 p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov));
395 DP_NOTICE(p_hwfn, true,
396 "Failed to allocate `struct ecore_sriov'\n");
400 OSAL_MEMSET(p_iov, 0, sizeof(*p_iov));
402 /* Allocate vf2pf msg */
403 p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
408 if (!p_iov->vf2pf_request) {
409 DP_NOTICE(p_hwfn, true,
410 "Failed to allocate `vf2pf_request' DMA memory\n");
414 p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
417 sizeof(union pfvf_tlvs));
418 if (!p_iov->pf2vf_reply) {
419 DP_NOTICE(p_hwfn, true,
420 "Failed to allocate `pf2vf_reply' DMA memory\n");
421 goto free_vf2pf_request;
424 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
425 "VF's Request mailbox [%p virt 0x%lx phys], "
426 "Response mailbox [%p virt 0x%lx phys]\n",
427 p_iov->vf2pf_request,
428 (unsigned long)p_iov->vf2pf_request_phys,
430 (unsigned long)p_iov->pf2vf_reply_phys);
432 /* Allocate Bulletin board */
433 p_iov->bulletin.size = sizeof(struct ecore_bulletin_content);
434 p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
439 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
440 "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
441 p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
442 p_iov->bulletin.size);
444 OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
445 OSAL_MUTEX_INIT(&p_iov->mutex);
447 p_hwfn->vf_iov_info = p_iov;
449 p_hwfn->hw_info.personality = ECORE_PCI_ETH;
451 return ecore_vf_pf_acquire(p_hwfn);
454 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
455 p_iov->vf2pf_request_phys,
456 sizeof(union vfpf_tlvs));
458 OSAL_FREE(p_hwfn->p_dev, p_iov);
463 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
464 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
465 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
467 enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
472 dma_addr_t bd_chain_phys_addr,
473 dma_addr_t cqe_pbl_addr,
475 void OSAL_IOMEM **pp_prod)
477 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
478 struct pfvf_start_queue_resp_tlv *resp;
479 struct vfpf_start_rxq_tlv *req;
480 enum _ecore_status_t rc;
482 /* clear mailbox and prep first tlv */
483 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
485 req->rx_qid = rx_qid;
486 req->cqe_pbl_addr = cqe_pbl_addr;
487 req->cqe_pbl_size = cqe_pbl_size;
488 req->rxq_addr = bd_chain_phys_addr;
490 req->sb_index = sb_index;
491 req->bd_max_bytes = bd_max_bytes;
492 req->stat_id = -1; /* Keep initialized, for future compatibility */
494 /* If PF is legacy, we'll need to calculate producers ourselves
495 * as well as clean them.
497 if (pp_prod && p_iov->b_pre_fp_hsi) {
498 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
499 u32 init_prod_val = 0;
501 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
502 MSTORM_QZONE_START(p_hwfn->p_dev) +
503 (hw_qid) * MSTORM_QZONE_SIZE;
505 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
506 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
507 (u32 *)(&init_prod_val));
510 /* add list termination tlv */
511 ecore_add_tlv(p_hwfn, &p_iov->offset,
512 CHANNEL_TLV_LIST_END,
513 sizeof(struct channel_list_end_tlv));
515 resp = &p_iov->pf2vf_reply->queue_start;
516 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
520 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
525 /* Learn the address of the producer from the response */
526 if (pp_prod && !p_iov->b_pre_fp_hsi) {
527 u32 init_prod_val = 0;
529 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
530 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
531 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
532 rx_qid, *pp_prod, resp->offset);
534 /* Init the rcq, rx bd and rx sge (if valid) producers to 0.
535 * It was actually the PF's responsibility, but since some
536 * old PFs might fail to do so, we do this as well.
538 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
539 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
540 (u32 *)&init_prod_val);
544 ecore_vf_pf_req_end(p_hwfn, rc);
549 enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
550 u16 rx_qid, bool cqe_completion)
552 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
553 struct vfpf_stop_rxqs_tlv *req;
554 struct pfvf_def_resp_tlv *resp;
555 enum _ecore_status_t rc;
557 /* clear mailbox and prep first tlv */
558 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
560 req->rx_qid = rx_qid;
562 req->cqe_completion = cqe_completion;
564 /* add list termination tlv */
565 ecore_add_tlv(p_hwfn, &p_iov->offset,
566 CHANNEL_TLV_LIST_END,
567 sizeof(struct channel_list_end_tlv));
569 resp = &p_iov->pf2vf_reply->default_resp;
570 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
574 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
580 ecore_vf_pf_req_end(p_hwfn, rc);
585 enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
591 void OSAL_IOMEM **pp_doorbell)
593 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
594 struct pfvf_start_queue_resp_tlv *resp;
595 struct vfpf_start_txq_tlv *req;
596 enum _ecore_status_t rc;
598 /* clear mailbox and prep first tlv */
599 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
601 req->tx_qid = tx_queue_id;
604 req->pbl_addr = pbl_addr;
605 req->pbl_size = pbl_size;
607 req->sb_index = sb_index;
609 /* add list termination tlv */
610 ecore_add_tlv(p_hwfn, &p_iov->offset,
611 CHANNEL_TLV_LIST_END,
612 sizeof(struct channel_list_end_tlv));
614 resp = &p_iov->pf2vf_reply->queue_start;
615 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
619 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
625 /* Modern PFs provide the actual offsets, while legacy
626 * provided only the queue id.
628 if (!p_iov->b_pre_fp_hsi) {
629 *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
632 u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
634 *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
635 DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
638 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
639 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
640 tx_queue_id, *pp_doorbell, resp->offset);
644 ecore_vf_pf_req_end(p_hwfn, rc);
649 enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
651 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
652 struct vfpf_stop_txqs_tlv *req;
653 struct pfvf_def_resp_tlv *resp;
654 enum _ecore_status_t rc;
656 /* clear mailbox and prep first tlv */
657 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
659 req->tx_qid = tx_qid;
662 /* add list termination tlv */
663 ecore_add_tlv(p_hwfn, &p_iov->offset,
664 CHANNEL_TLV_LIST_END,
665 sizeof(struct channel_list_end_tlv));
667 resp = &p_iov->pf2vf_reply->default_resp;
668 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
672 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
678 ecore_vf_pf_req_end(p_hwfn, rc);
683 enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
686 u8 comp_cqe_flg, u8 comp_event_flg)
688 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
689 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
690 struct vfpf_update_rxq_tlv *req;
691 enum _ecore_status_t rc;
693 /* clear mailbox and prep first tlv */
694 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
696 req->rx_qid = rx_queue_id;
697 req->num_rxqs = num_rxqs;
700 req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
702 req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
704 /* add list termination tlv */
705 ecore_add_tlv(p_hwfn, &p_iov->offset,
706 CHANNEL_TLV_LIST_END,
707 sizeof(struct channel_list_end_tlv));
709 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
713 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
719 ecore_vf_pf_req_end(p_hwfn, rc);
725 ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
726 u16 mtu, u8 inner_vlan_removal,
727 enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe,
730 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
731 struct vfpf_vport_start_tlv *req;
732 struct pfvf_def_resp_tlv *resp;
733 enum _ecore_status_t rc;
736 /* clear mailbox and prep first tlv */
737 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
740 req->vport_id = vport_id;
741 req->inner_vlan_removal = inner_vlan_removal;
742 req->tpa_mode = tpa_mode;
743 req->max_buffers_per_cqe = max_buffers_per_cqe;
744 req->only_untagged = only_untagged;
747 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
748 if (p_hwfn->sbs_info[i])
749 req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
751 /* add list termination tlv */
752 ecore_add_tlv(p_hwfn, &p_iov->offset,
753 CHANNEL_TLV_LIST_END,
754 sizeof(struct channel_list_end_tlv));
756 resp = &p_iov->pf2vf_reply->default_resp;
757 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
761 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
767 ecore_vf_pf_req_end(p_hwfn, rc);
772 enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
774 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
775 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
776 enum _ecore_status_t rc;
778 /* clear mailbox and prep first tlv */
779 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
780 sizeof(struct vfpf_first_tlv));
782 /* add list termination tlv */
783 ecore_add_tlv(p_hwfn, &p_iov->offset,
784 CHANNEL_TLV_LIST_END,
785 sizeof(struct channel_list_end_tlv));
787 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
791 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
797 ecore_vf_pf_req_end(p_hwfn, rc);
803 ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn,
804 struct ecore_sp_vport_update_params *p_data,
808 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
809 return !!(p_data->update_vport_active_rx_flg ||
810 p_data->update_vport_active_tx_flg);
811 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
813 /* FPGA doesn't have PVFC and so can't support tx-switching */
814 return !!(p_data->update_tx_switching_flg &&
815 !CHIP_REV_IS_FPGA(p_hwfn->p_dev));
817 return !!p_data->update_tx_switching_flg;
819 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
820 return !!p_data->update_inner_vlan_removal_flg;
821 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
822 return !!p_data->update_accept_any_vlan_flg;
823 case CHANNEL_TLV_VPORT_UPDATE_MCAST:
824 return !!p_data->update_approx_mcast_flg;
825 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
826 return !!(p_data->accept_flags.update_rx_mode_config ||
827 p_data->accept_flags.update_tx_mode_config);
828 case CHANNEL_TLV_VPORT_UPDATE_RSS:
829 return !!p_data->rss_params;
830 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
831 return !!p_data->sge_tpa_params;
833 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
834 tlv, ecore_channel_tlvs_string[tlv]);
840 ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
841 struct ecore_sp_vport_update_params *p_data)
843 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
844 struct pfvf_def_resp_tlv *p_resp;
847 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
848 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX;
850 if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
853 p_resp = (struct pfvf_def_resp_tlv *)
854 ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
855 if (p_resp && p_resp->hdr.status)
856 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
857 "TLV[%d] type %s Configuration %s\n",
858 tlv, ecore_channel_tlvs_string[tlv],
859 (p_resp && p_resp->hdr.status) ? "succeeded"
865 ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
866 struct ecore_sp_vport_update_params *p_params)
868 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
869 struct vfpf_vport_update_tlv *req;
870 struct pfvf_def_resp_tlv *resp;
871 u8 update_rx, update_tx;
874 enum _ecore_status_t rc;
876 resp = &p_iov->pf2vf_reply->default_resp;
877 resp_size = sizeof(*resp);
879 update_rx = p_params->update_vport_active_rx_flg;
880 update_tx = p_params->update_vport_active_tx_flg;
882 /* clear mailbox and prep header tlv */
883 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
885 /* Prepare extended tlvs */
886 if (update_rx || update_tx) {
887 struct vfpf_vport_update_activate_tlv *p_act_tlv;
889 size = sizeof(struct vfpf_vport_update_activate_tlv);
890 p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
891 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
893 resp_size += sizeof(struct pfvf_def_resp_tlv);
896 p_act_tlv->update_rx = update_rx;
897 p_act_tlv->active_rx = p_params->vport_active_rx_flg;
901 p_act_tlv->update_tx = update_tx;
902 p_act_tlv->active_tx = p_params->vport_active_tx_flg;
906 if (p_params->update_inner_vlan_removal_flg) {
907 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
909 size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
910 p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
911 CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
913 resp_size += sizeof(struct pfvf_def_resp_tlv);
915 p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg;
918 if (p_params->update_tx_switching_flg) {
919 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
921 size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
922 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
923 p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
925 resp_size += sizeof(struct pfvf_def_resp_tlv);
927 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
930 if (p_params->update_approx_mcast_flg) {
931 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
933 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
934 p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
935 CHANNEL_TLV_VPORT_UPDATE_MCAST,
937 resp_size += sizeof(struct pfvf_def_resp_tlv);
939 OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
940 sizeof(unsigned long) *
941 ETH_MULTICAST_MAC_BINS_IN_REGS);
944 update_rx = p_params->accept_flags.update_rx_mode_config;
945 update_tx = p_params->accept_flags.update_tx_mode_config;
947 if (update_rx || update_tx) {
948 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
950 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
951 size = sizeof(struct vfpf_vport_update_accept_param_tlv);
952 p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
953 resp_size += sizeof(struct pfvf_def_resp_tlv);
956 p_accept_tlv->update_rx_mode = update_rx;
957 p_accept_tlv->rx_accept_filter =
958 p_params->accept_flags.rx_accept_filter;
962 p_accept_tlv->update_tx_mode = update_tx;
963 p_accept_tlv->tx_accept_filter =
964 p_params->accept_flags.tx_accept_filter;
968 if (p_params->rss_params) {
969 struct ecore_rss_params *rss_params = p_params->rss_params;
970 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
972 size = sizeof(struct vfpf_vport_update_rss_tlv);
973 p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
974 CHANNEL_TLV_VPORT_UPDATE_RSS, size);
975 resp_size += sizeof(struct pfvf_def_resp_tlv);
977 if (rss_params->update_rss_config)
978 p_rss_tlv->update_rss_flags |=
979 VFPF_UPDATE_RSS_CONFIG_FLAG;
980 if (rss_params->update_rss_capabilities)
981 p_rss_tlv->update_rss_flags |=
982 VFPF_UPDATE_RSS_CAPS_FLAG;
983 if (rss_params->update_rss_ind_table)
984 p_rss_tlv->update_rss_flags |=
985 VFPF_UPDATE_RSS_IND_TABLE_FLAG;
986 if (rss_params->update_rss_key)
987 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
989 p_rss_tlv->rss_enable = rss_params->rss_enable;
990 p_rss_tlv->rss_caps = rss_params->rss_caps;
991 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
992 OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
993 sizeof(rss_params->rss_ind_table));
994 OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
995 sizeof(rss_params->rss_key));
998 if (p_params->update_accept_any_vlan_flg) {
999 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
1001 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
1002 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1003 p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
1006 resp_size += sizeof(struct pfvf_def_resp_tlv);
1007 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
1008 p_any_vlan_tlv->update_accept_any_vlan_flg =
1009 p_params->update_accept_any_vlan_flg;
1012 if (p_params->sge_tpa_params) {
1013 struct ecore_sge_tpa_params *sge_tpa_params;
1014 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
1016 sge_tpa_params = p_params->sge_tpa_params;
1017 size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
1018 p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
1019 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
1021 resp_size += sizeof(struct pfvf_def_resp_tlv);
1023 if (sge_tpa_params->update_tpa_en_flg)
1024 p_sge_tpa_tlv->update_sge_tpa_flags |=
1025 VFPF_UPDATE_TPA_EN_FLAG;
1026 if (sge_tpa_params->update_tpa_param_flg)
1027 p_sge_tpa_tlv->update_sge_tpa_flags |=
1028 VFPF_UPDATE_TPA_PARAM_FLAG;
1030 if (sge_tpa_params->tpa_ipv4_en_flg)
1031 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG;
1032 if (sge_tpa_params->tpa_ipv6_en_flg)
1033 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG;
1034 if (sge_tpa_params->tpa_pkt_split_flg)
1035 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG;
1036 if (sge_tpa_params->tpa_hdr_data_split_flg)
1037 p_sge_tpa_tlv->sge_tpa_flags |=
1038 VFPF_TPA_HDR_DATA_SPLIT_FLAG;
1039 if (sge_tpa_params->tpa_gro_consistent_flg)
1040 p_sge_tpa_tlv->sge_tpa_flags |=
1041 VFPF_TPA_GRO_CONSIST_FLAG;
1043 p_sge_tpa_tlv->tpa_max_aggs_num =
1044 sge_tpa_params->tpa_max_aggs_num;
1045 p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size;
1046 p_sge_tpa_tlv->tpa_min_size_to_start =
1047 sge_tpa_params->tpa_min_size_to_start;
1048 p_sge_tpa_tlv->tpa_min_size_to_cont =
1049 sge_tpa_params->tpa_min_size_to_cont;
1051 p_sge_tpa_tlv->max_buffers_per_cqe =
1052 sge_tpa_params->max_buffers_per_cqe;
1055 /* add list termination tlv */
1056 ecore_add_tlv(p_hwfn, &p_iov->offset,
1057 CHANNEL_TLV_LIST_END,
1058 sizeof(struct channel_list_end_tlv));
1060 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
1064 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1069 ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
1072 ecore_vf_pf_req_end(p_hwfn, rc);
1077 enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
1079 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1080 struct pfvf_def_resp_tlv *resp;
1081 struct vfpf_first_tlv *req;
1082 enum _ecore_status_t rc;
1084 /* clear mailbox and prep first tlv */
1085 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
1087 /* add list termination tlv */
1088 ecore_add_tlv(p_hwfn, &p_iov->offset,
1089 CHANNEL_TLV_LIST_END,
1090 sizeof(struct channel_list_end_tlv));
1092 resp = &p_iov->pf2vf_reply->default_resp;
1093 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1097 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1102 p_hwfn->b_int_enabled = 0;
1105 ecore_vf_pf_req_end(p_hwfn, rc);
1110 enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
1112 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1113 struct pfvf_def_resp_tlv *resp;
1114 struct vfpf_first_tlv *req;
1115 enum _ecore_status_t rc;
1118 /* clear mailbox and prep first tlv */
1119 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
1121 /* add list termination tlv */
1122 ecore_add_tlv(p_hwfn, &p_iov->offset,
1123 CHANNEL_TLV_LIST_END,
1124 sizeof(struct channel_list_end_tlv));
1126 resp = &p_iov->pf2vf_reply->default_resp;
1127 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1129 if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
1132 ecore_vf_pf_req_end(p_hwfn, rc);
1134 p_hwfn->b_int_enabled = 0;
1136 if (p_iov->vf2pf_request)
1137 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1138 p_iov->vf2pf_request,
1139 p_iov->vf2pf_request_phys,
1140 sizeof(union vfpf_tlvs));
1141 if (p_iov->pf2vf_reply)
1142 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1144 p_iov->pf2vf_reply_phys,
1145 sizeof(union pfvf_tlvs));
1147 if (p_iov->bulletin.p_virt) {
1148 size = sizeof(struct ecore_bulletin_content);
1149 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1150 p_iov->bulletin.p_virt,
1151 p_iov->bulletin.phys, size);
1154 OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
1155 p_hwfn->vf_iov_info = OSAL_NULL;
1160 void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
1161 struct ecore_filter_mcast *p_filter_cmd)
1163 struct ecore_sp_vport_update_params sp_params;
1166 OSAL_MEMSET(&sp_params, 0, sizeof(sp_params));
1167 sp_params.update_approx_mcast_flg = 1;
1169 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1170 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1173 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1174 OSAL_SET_BIT(bit, sp_params.bins);
1178 ecore_vf_pf_vport_update(p_hwfn, &sp_params);
1181 enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
1182 struct ecore_filter_ucast
1185 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1186 struct vfpf_ucast_filter_tlv *req;
1187 struct pfvf_def_resp_tlv *resp;
1188 enum _ecore_status_t rc;
1191 if (p_ucast->opcode == ECORE_FILTER_MOVE) {
1192 DP_NOTICE(p_hwfn, true,
1193 "VFs don't support Moving of filters\n");
1197 /* clear mailbox and prep first tlv */
1198 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
1199 req->opcode = (u8)p_ucast->opcode;
1200 req->type = (u8)p_ucast->type;
1201 OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN);
1202 req->vlan = p_ucast->vlan;
1204 /* add list termination tlv */
1205 ecore_add_tlv(p_hwfn, &p_iov->offset,
1206 CHANNEL_TLV_LIST_END,
1207 sizeof(struct channel_list_end_tlv));
1209 resp = &p_iov->pf2vf_reply->default_resp;
1210 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1214 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1220 ecore_vf_pf_req_end(p_hwfn, rc);
1225 enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
1227 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1228 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
1229 enum _ecore_status_t rc;
1231 /* clear mailbox and prep first tlv */
1232 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
1233 sizeof(struct vfpf_first_tlv));
1235 /* add list termination tlv */
1236 ecore_add_tlv(p_hwfn, &p_iov->offset,
1237 CHANNEL_TLV_LIST_END,
1238 sizeof(struct channel_list_end_tlv));
1240 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1244 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1250 ecore_vf_pf_req_end(p_hwfn, rc);
1255 u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
1258 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1261 DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
1265 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
1268 enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
1271 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1272 struct ecore_bulletin_content shadow;
1275 crc_size = sizeof(p_iov->bulletin.p_virt->crc);
1278 /* Need to guarantee PF is not in the middle of writing it */
1279 OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
1281 /* If version did not update, no need to do anything */
1282 if (shadow.version == p_iov->bulletin_shadow.version)
1283 return ECORE_SUCCESS;
1285 /* Verify the bulletin we see is valid */
1286 crc = ecore_crc32(0, (u8 *)&shadow + crc_size,
1287 p_iov->bulletin.size - crc_size);
1288 if (crc != shadow.crc)
1291 /* Set the shadow bulletin and process it */
1292 OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
1294 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1295 "Read a bulletin update %08x\n", shadow.version);
1299 return ECORE_SUCCESS;
1302 void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
1303 struct ecore_mcp_link_params *p_params,
1304 struct ecore_bulletin_content *p_bulletin)
1306 OSAL_MEMSET(p_params, 0, sizeof(*p_params));
1308 p_params->speed.autoneg = p_bulletin->req_autoneg;
1309 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
1310 p_params->speed.forced_speed = p_bulletin->req_forced_speed;
1311 p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
1312 p_params->pause.forced_rx = p_bulletin->req_forced_rx;
1313 p_params->pause.forced_tx = p_bulletin->req_forced_tx;
1314 p_params->loopback_mode = p_bulletin->req_loopback;
1317 void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
1318 struct ecore_mcp_link_params *params)
1320 __ecore_vf_get_link_params(p_hwfn, params,
1321 &p_hwfn->vf_iov_info->bulletin_shadow);
1324 void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
1325 struct ecore_mcp_link_state *p_link,
1326 struct ecore_bulletin_content *p_bulletin)
1328 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1330 p_link->link_up = p_bulletin->link_up;
1331 p_link->speed = p_bulletin->speed;
1332 p_link->full_duplex = p_bulletin->full_duplex;
1333 p_link->an = p_bulletin->autoneg;
1334 p_link->an_complete = p_bulletin->autoneg_complete;
1335 p_link->parallel_detection = p_bulletin->parallel_detection;
1336 p_link->pfc_enabled = p_bulletin->pfc_enabled;
1337 p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
1338 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
1339 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
1340 p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
1341 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
1344 void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
1345 struct ecore_mcp_link_state *link)
1347 __ecore_vf_get_link_state(p_hwfn, link,
1348 &p_hwfn->vf_iov_info->bulletin_shadow);
1351 void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
1352 struct ecore_mcp_link_capabilities *p_link_caps,
1353 struct ecore_bulletin_content *p_bulletin)
1355 OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
1356 p_link_caps->speed_capabilities = p_bulletin->capability_speed;
1359 void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
1360 struct ecore_mcp_link_capabilities *p_link_caps)
1362 __ecore_vf_get_link_caps(p_hwfn, p_link_caps,
1363 &p_hwfn->vf_iov_info->bulletin_shadow);
1366 void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
1368 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
1371 void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
1373 OSAL_MEMCPY(port_mac,
1374 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac,
1378 void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
1379 u8 *num_vlan_filters)
1381 struct ecore_vf_iov *p_vf;
1383 p_vf = p_hwfn->vf_iov_info;
1384 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
1388 void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
1391 struct ecore_vf_iov *p_vf;
1393 p_vf = p_hwfn->vf_iov_info;
1394 *num_mac = p_vf->acquire_resp.resc.num_mac_filters;
1397 void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
1400 struct ecore_vf_iov *p_vf;
1402 p_vf = p_hwfn->vf_iov_info;
1403 *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
1406 bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
1408 struct ecore_bulletin_content *bulletin;
1410 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1411 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
1414 /* Forbid VF from changing a MAC enforced by PF */
1415 if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN))
1421 bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
1424 struct ecore_bulletin_content *bulletin;
1426 bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1428 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
1431 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
1438 OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN);
1443 bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
1445 struct ecore_bulletin_content *bulletin;
1447 bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1449 if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
1453 *dst_pvid = bulletin->pvid;
1458 bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn)
1460 return p_hwfn->vf_iov_info->b_pre_fp_hsi;
1463 void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
1464 u16 *fw_major, u16 *fw_minor, u16 *fw_rev,
1467 struct pf_vf_pfdev_info *info;
1469 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
1471 *fw_major = info->fw_major;
1472 *fw_minor = info->fw_minor;
1473 *fw_rev = info->fw_rev;
1474 *fw_eng = info->fw_eng;