2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_hsi_eth.h"
12 #include "ecore_sriov.h"
13 #include "ecore_l2_api.h"
15 #include "ecore_vfpf_if.h"
16 #include "ecore_status.h"
18 #include "ecore_int.h"
20 #include "ecore_mcp_api.h"
21 #include "ecore_vf_api.h"
23 static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
25 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
28 /* This lock is released when we receive PF's response
29 * in ecore_send_msg2pf().
30 * So, ecore_vf_pf_prep() and ecore_send_msg2pf()
31 * must come in sequence.
33 OSAL_MUTEX_ACQUIRE(&p_iov->mutex);
35 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
36 "preparing to send %s tlv over vf pf channel\n",
37 ecore_channel_tlvs_string[type]);
39 /* Reset Request offset */
40 p_iov->offset = (u8 *)(p_iov->vf2pf_request);
42 /* Clear mailbox - both request and reply */
43 OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
44 OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
46 /* Init type and length */
47 p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
49 /* Init first tlv header */
50 ((struct vfpf_first_tlv *)p_tlv)->reply_address =
51 (u64)p_iov->pf2vf_reply_phys;
56 static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
57 enum _ecore_status_t req_status)
59 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
61 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
62 "VF request status = 0x%x, PF reply status = 0x%x\n",
63 req_status, resp->default_resp.hdr.status);
65 OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
68 static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
69 u8 *done, u32 resp_size)
71 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
72 struct ustorm_trigger_vf_zone trigger;
73 struct ustorm_vf_zone *zone_data;
74 int rc = ECORE_SUCCESS, time = 100;
76 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
78 /* output tlvs list */
79 ecore_dp_tlv_list(p_hwfn, p_req);
81 /* need to add the END TLV to the message size */
82 resp_size += sizeof(struct channel_list_end_tlv);
84 if (!p_hwfn->p_dev->b_hw_channel) {
85 rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,
88 p_hwfn->vf_iov_info->pf2vf_reply,
89 sizeof(union vfpf_tlvs), resp_size);
90 /* TODO - no prints about message ? */
94 /* Send TLVs over HW channel */
95 OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
96 trigger.vf_pf_msg_valid = 1;
98 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
99 "VF -> PF [%02x] message: [%08x, %08x] --> %p,"
101 GET_FIELD(p_hwfn->hw_info.concrete_fid,
102 PXP_CONCRETE_FID_PFID),
103 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),
104 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),
105 &zone_data->non_trigger.vf_pf_msg_addr,
106 *((u32 *)&trigger), &zone_data->trigger);
109 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
110 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys));
113 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
114 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys));
116 /* The message data must be written first, to prevent trigger before
119 OSAL_WMB(p_hwfn->p_dev);
121 REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger,
124 /* When PF would be done with the response, it would write back to the
125 * `done' address. Poll until then.
127 while ((!*done) && time) {
133 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
134 "VF <-- PF Timeout [Type %d]\n",
135 p_req->first_tlv.tl.type);
139 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
140 "PF response: %d [Type %d]\n",
141 *done, p_req->first_tlv.tl.type);
147 #define VF_ACQUIRE_THRESH 3
148 static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
149 struct vf_pf_resc_request *p_req,
150 struct pf_vf_resc *p_resp)
152 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
153 "PF unwilling to fullill resource request: rxq [%02x/%02x]"
154 " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
155 " vlan [%02x/%02x] mc [%02x/%02x]."
156 " Try PF recommended amount\n",
157 p_req->num_rxqs, p_resp->num_rxqs,
158 p_req->num_rxqs, p_resp->num_txqs,
159 p_req->num_sbs, p_resp->num_sbs,
160 p_req->num_mac_filters, p_resp->num_mac_filters,
161 p_req->num_vlan_filters, p_resp->num_vlan_filters,
162 p_req->num_mc_filters, p_resp->num_mc_filters);
164 /* humble our request */
165 p_req->num_txqs = p_resp->num_txqs;
166 p_req->num_rxqs = p_resp->num_rxqs;
167 p_req->num_sbs = p_resp->num_sbs;
168 p_req->num_mac_filters = p_resp->num_mac_filters;
169 p_req->num_vlan_filters = p_resp->num_vlan_filters;
170 p_req->num_mc_filters = p_resp->num_mc_filters;
173 static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
175 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
176 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
177 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
178 struct ecore_vf_acquire_sw_info vf_sw_info;
179 struct vf_pf_resc_request *p_resc;
180 bool resources_acquired = false;
181 struct vfpf_acquire_tlv *req;
183 enum _ecore_status_t rc = ECORE_SUCCESS;
185 /* clear mailbox and prep first tlv */
186 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
187 p_resc = &req->resc_request;
189 /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */
190 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
192 p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF;
193 p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF;
194 p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
195 p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
196 p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
198 OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
199 OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
201 req->vfdev_info.os_type = vf_sw_info.os_type;
202 req->vfdev_info.driver_version = vf_sw_info.driver_version;
203 req->vfdev_info.fw_major = FW_MAJOR_VERSION;
204 req->vfdev_info.fw_minor = FW_MINOR_VERSION;
205 req->vfdev_info.fw_revision = FW_REVISION_VERSION;
206 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
207 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
208 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
210 /* Fill capability field with any non-deprecated config we support */
211 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
213 /* pf 2 vf bulletin board address */
214 req->bulletin_addr = p_iov->bulletin.phys;
215 req->bulletin_size = p_iov->bulletin.size;
217 /* add list termination tlv */
218 ecore_add_tlv(p_hwfn, &p_iov->offset,
219 CHANNEL_TLV_LIST_END,
220 sizeof(struct channel_list_end_tlv));
222 while (!resources_acquired) {
223 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
224 "attempting to acquire resources\n");
226 /* Clear response buffer, as this might be a re-send */
227 OSAL_MEMSET(p_iov->pf2vf_reply, 0,
228 sizeof(union pfvf_tlvs));
230 /* send acquire request */
231 rc = ecore_send_msg2pf(p_hwfn,
232 &resp->hdr.status, sizeof(*resp));
238 /* copy acquire response from buffer to p_hwfn */
239 OSAL_MEMCPY(&p_iov->acquire_resp,
240 resp, sizeof(p_iov->acquire_resp));
244 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
245 /* PF agrees to allocate our resources */
246 if (!(resp->pfdev_info.capabilities &
247 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
248 /* It's possible legacy PF mistakenly accepted;
249 * but we don't care - simply mark it as
250 * legacy and continue.
252 req->vfdev_info.capabilities |=
253 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
255 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
256 "resources acquired\n");
257 resources_acquired = true;
258 } /* PF refuses to allocate our resources */
259 else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
260 attempts < VF_ACQUIRE_THRESH) {
261 ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
264 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
265 if (pfdev_info->major_fp_hsi &&
266 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
267 DP_NOTICE(p_hwfn, false,
268 "PF uses an incompatible fastpath HSI"
269 " %02x.%02x [VF requires %02x.%02x]."
270 " Please change to a VF driver using"
272 pfdev_info->major_fp_hsi,
273 pfdev_info->minor_fp_hsi,
274 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR,
275 pfdev_info->major_fp_hsi);
280 if (!pfdev_info->major_fp_hsi) {
281 if (req->vfdev_info.capabilities &
282 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
283 DP_NOTICE(p_hwfn, false,
284 "PF uses very old drivers."
285 " Please change to a VF"
286 " driver using no later than"
292 "PF is old - try re-acquire to"
293 " see if it supports FW-version"
295 req->vfdev_info.capabilities |=
296 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
301 "PF returned err %d to VF acquisition request\n",
308 /* Mark the PF as legacy, if needed */
309 if (req->vfdev_info.capabilities &
310 VFPF_ACQUIRE_CAP_PRE_FP_HSI)
311 p_iov->b_pre_fp_hsi = true;
313 rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
315 DP_NOTICE(p_hwfn, true,
316 "VF_UPDATE_ACQUIRE_RESC_RESP Failed:"
323 /* Update bulletin board size with response from PF */
324 p_iov->bulletin.size = resp->bulletin_size;
327 p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
328 p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;
330 DP_INFO(p_hwfn, "Chip details - %s%d\n",
331 ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
332 CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
334 p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
336 /* Learn of the possibility of CMT */
337 if (IS_LEAD_HWFN(p_hwfn)) {
338 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
339 DP_INFO(p_hwfn, "100g VF\n");
340 p_hwfn->p_dev->num_hwfns = 2;
345 if ((~p_iov->b_pre_fp_hsi &
346 ETH_HSI_VER_MINOR) &&
347 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR))
349 "PF is using older fastpath HSI;"
350 " %02x.%02x is configured\n",
352 resp->pfdev_info.minor_fp_hsi);
355 ecore_vf_pf_req_end(p_hwfn, rc);
360 enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
362 struct ecore_vf_iov *p_iov;
365 /* Set number of hwfns - might be overridden once leading hwfn learns
366 * actual configuration from PF.
368 if (IS_LEAD_HWFN(p_hwfn))
369 p_hwfn->p_dev->num_hwfns = 1;
371 /* Set the doorbell bar. Assumption: regview is set */
372 p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
373 PXP_VF_BAR0_START_DQ;
375 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
376 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
378 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
379 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
381 /* Allocate vf sriov info */
382 p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov));
384 DP_NOTICE(p_hwfn, true,
385 "Failed to allocate `struct ecore_sriov'\n");
389 OSAL_MEMSET(p_iov, 0, sizeof(*p_iov));
391 /* Allocate vf2pf msg */
392 p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
397 if (!p_iov->vf2pf_request) {
398 DP_NOTICE(p_hwfn, true,
399 "Failed to allocate `vf2pf_request' DMA memory\n");
403 p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
406 sizeof(union pfvf_tlvs));
407 if (!p_iov->pf2vf_reply) {
408 DP_NOTICE(p_hwfn, true,
409 "Failed to allocate `pf2vf_reply' DMA memory\n");
410 goto free_vf2pf_request;
413 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
414 "VF's Request mailbox [%p virt 0x%lx phys], "
415 "Response mailbox [%p virt 0x%lx phys]\n",
416 p_iov->vf2pf_request,
417 (unsigned long)p_iov->vf2pf_request_phys,
419 (unsigned long)p_iov->pf2vf_reply_phys);
421 /* Allocate Bulletin board */
422 p_iov->bulletin.size = sizeof(struct ecore_bulletin_content);
423 p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
428 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
429 "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
430 p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
431 p_iov->bulletin.size);
433 OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
434 OSAL_MUTEX_INIT(&p_iov->mutex);
436 p_hwfn->vf_iov_info = p_iov;
438 p_hwfn->hw_info.personality = ECORE_PCI_ETH;
440 return ecore_vf_pf_acquire(p_hwfn);
443 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
444 p_iov->vf2pf_request_phys,
445 sizeof(union vfpf_tlvs));
447 OSAL_FREE(p_hwfn->p_dev, p_iov);
452 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
453 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
454 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
456 enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
461 dma_addr_t bd_chain_phys_addr,
462 dma_addr_t cqe_pbl_addr,
464 void OSAL_IOMEM **pp_prod)
466 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
467 struct pfvf_start_queue_resp_tlv *resp;
468 struct vfpf_start_rxq_tlv *req;
471 /* clear mailbox and prep first tlv */
472 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
474 req->rx_qid = rx_qid;
475 req->cqe_pbl_addr = cqe_pbl_addr;
476 req->cqe_pbl_size = cqe_pbl_size;
477 req->rxq_addr = bd_chain_phys_addr;
479 req->sb_index = sb_index;
480 req->bd_max_bytes = bd_max_bytes;
481 req->stat_id = -1; /* Keep initialized, for future compatibility */
483 /* If PF is legacy, we'll need to calculate producers ourselves
484 * as well as clean them.
486 if (pp_prod && p_iov->b_pre_fp_hsi) {
487 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
488 u32 init_prod_val = 0;
490 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
491 MSTORM_QZONE_START(p_hwfn->p_dev) +
492 (hw_qid) * MSTORM_QZONE_SIZE;
494 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
495 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
496 (u32 *)(&init_prod_val));
499 /* add list termination tlv */
500 ecore_add_tlv(p_hwfn, &p_iov->offset,
501 CHANNEL_TLV_LIST_END,
502 sizeof(struct channel_list_end_tlv));
504 resp = &p_iov->pf2vf_reply->queue_start;
505 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
509 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
514 /* Learn the address of the producer from the response */
515 if (pp_prod && !p_iov->b_pre_fp_hsi) {
516 u32 init_prod_val = 0;
518 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
519 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
520 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
521 rx_qid, *pp_prod, resp->offset);
523 /* Init the rcq, rx bd and rx sge (if valid) producers to 0.
524 * It was actually the PF's responsibility, but since some
525 * old PFs might fail to do so, we do this as well.
527 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
528 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
529 (u32 *)&init_prod_val);
533 ecore_vf_pf_req_end(p_hwfn, rc);
538 enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
539 u16 rx_qid, bool cqe_completion)
541 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
542 struct vfpf_stop_rxqs_tlv *req;
543 struct pfvf_def_resp_tlv *resp;
546 /* clear mailbox and prep first tlv */
547 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
549 req->rx_qid = rx_qid;
551 req->cqe_completion = cqe_completion;
553 /* add list termination tlv */
554 ecore_add_tlv(p_hwfn, &p_iov->offset,
555 CHANNEL_TLV_LIST_END,
556 sizeof(struct channel_list_end_tlv));
558 resp = &p_iov->pf2vf_reply->default_resp;
559 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
563 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
569 ecore_vf_pf_req_end(p_hwfn, rc);
574 enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
580 void OSAL_IOMEM **pp_doorbell)
582 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
583 struct pfvf_start_queue_resp_tlv *resp;
584 struct vfpf_start_txq_tlv *req;
587 /* clear mailbox and prep first tlv */
588 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
590 req->tx_qid = tx_queue_id;
593 req->pbl_addr = pbl_addr;
594 req->pbl_size = pbl_size;
596 req->sb_index = sb_index;
598 /* add list termination tlv */
599 ecore_add_tlv(p_hwfn, &p_iov->offset,
600 CHANNEL_TLV_LIST_END,
601 sizeof(struct channel_list_end_tlv));
603 resp = &p_iov->pf2vf_reply->queue_start;
604 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
608 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
614 /* Modern PFs provide the actual offsets, while legacy
615 * provided only the queue id.
617 if (!p_iov->b_pre_fp_hsi) {
618 *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
621 u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
623 *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
624 DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
627 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
628 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
629 tx_queue_id, *pp_doorbell, resp->offset);
633 ecore_vf_pf_req_end(p_hwfn, rc);
638 enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
640 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
641 struct vfpf_stop_txqs_tlv *req;
642 struct pfvf_def_resp_tlv *resp;
645 /* clear mailbox and prep first tlv */
646 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
648 req->tx_qid = tx_qid;
651 /* add list termination tlv */
652 ecore_add_tlv(p_hwfn, &p_iov->offset,
653 CHANNEL_TLV_LIST_END,
654 sizeof(struct channel_list_end_tlv));
656 resp = &p_iov->pf2vf_reply->default_resp;
657 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
661 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
667 ecore_vf_pf_req_end(p_hwfn, rc);
672 enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
675 u8 comp_cqe_flg, u8 comp_event_flg)
677 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
678 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
679 struct vfpf_update_rxq_tlv *req;
682 /* clear mailbox and prep first tlv */
683 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
685 req->rx_qid = rx_queue_id;
686 req->num_rxqs = num_rxqs;
689 req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
691 req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
693 /* add list termination tlv */
694 ecore_add_tlv(p_hwfn, &p_iov->offset,
695 CHANNEL_TLV_LIST_END,
696 sizeof(struct channel_list_end_tlv));
698 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
702 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
708 ecore_vf_pf_req_end(p_hwfn, rc);
714 ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
715 u16 mtu, u8 inner_vlan_removal,
716 enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe,
719 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
720 struct vfpf_vport_start_tlv *req;
721 struct pfvf_def_resp_tlv *resp;
724 /* clear mailbox and prep first tlv */
725 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
728 req->vport_id = vport_id;
729 req->inner_vlan_removal = inner_vlan_removal;
730 req->tpa_mode = tpa_mode;
731 req->max_buffers_per_cqe = max_buffers_per_cqe;
732 req->only_untagged = only_untagged;
735 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
736 if (p_hwfn->sbs_info[i])
737 req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
739 /* add list termination tlv */
740 ecore_add_tlv(p_hwfn, &p_iov->offset,
741 CHANNEL_TLV_LIST_END,
742 sizeof(struct channel_list_end_tlv));
744 resp = &p_iov->pf2vf_reply->default_resp;
745 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
749 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
755 ecore_vf_pf_req_end(p_hwfn, rc);
760 enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
762 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
763 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
766 /* clear mailbox and prep first tlv */
767 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
768 sizeof(struct vfpf_first_tlv));
770 /* add list termination tlv */
771 ecore_add_tlv(p_hwfn, &p_iov->offset,
772 CHANNEL_TLV_LIST_END,
773 sizeof(struct channel_list_end_tlv));
775 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
779 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
785 ecore_vf_pf_req_end(p_hwfn, rc);
791 ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn,
792 struct ecore_sp_vport_update_params *p_data,
796 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
797 return !!(p_data->update_vport_active_rx_flg ||
798 p_data->update_vport_active_tx_flg);
799 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
801 /* FPGA doesn't have PVFC and so can't support tx-switching */
802 return !!(p_data->update_tx_switching_flg &&
803 !CHIP_REV_IS_FPGA(p_hwfn->p_dev));
805 return !!p_data->update_tx_switching_flg;
807 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
808 return !!p_data->update_inner_vlan_removal_flg;
809 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
810 return !!p_data->update_accept_any_vlan_flg;
811 case CHANNEL_TLV_VPORT_UPDATE_MCAST:
812 return !!p_data->update_approx_mcast_flg;
813 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
814 return !!(p_data->accept_flags.update_rx_mode_config ||
815 p_data->accept_flags.update_tx_mode_config);
816 case CHANNEL_TLV_VPORT_UPDATE_RSS:
817 return !!p_data->rss_params;
818 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
819 return !!p_data->sge_tpa_params;
821 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
822 tlv, ecore_channel_tlvs_string[tlv]);
828 ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
829 struct ecore_sp_vport_update_params *p_data)
831 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
832 struct pfvf_def_resp_tlv *p_resp;
835 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
836 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX;
838 if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
841 p_resp = (struct pfvf_def_resp_tlv *)
842 ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
843 if (p_resp && p_resp->hdr.status)
844 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
845 "TLV[%d] type %s Configuration %s\n",
846 tlv, ecore_channel_tlvs_string[tlv],
847 (p_resp && p_resp->hdr.status) ? "succeeded"
853 ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
854 struct ecore_sp_vport_update_params *p_params)
856 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
857 struct vfpf_vport_update_tlv *req;
858 struct pfvf_def_resp_tlv *resp;
859 u8 update_rx, update_tx;
864 resp = &p_iov->pf2vf_reply->default_resp;
865 resp_size = sizeof(*resp);
867 update_rx = p_params->update_vport_active_rx_flg;
868 update_tx = p_params->update_vport_active_tx_flg;
870 /* clear mailbox and prep header tlv */
871 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
873 /* Prepare extended tlvs */
874 if (update_rx || update_tx) {
875 struct vfpf_vport_update_activate_tlv *p_act_tlv;
877 size = sizeof(struct vfpf_vport_update_activate_tlv);
878 p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
879 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
881 resp_size += sizeof(struct pfvf_def_resp_tlv);
884 p_act_tlv->update_rx = update_rx;
885 p_act_tlv->active_rx = p_params->vport_active_rx_flg;
889 p_act_tlv->update_tx = update_tx;
890 p_act_tlv->active_tx = p_params->vport_active_tx_flg;
894 if (p_params->update_inner_vlan_removal_flg) {
895 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
897 size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
898 p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
899 CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
901 resp_size += sizeof(struct pfvf_def_resp_tlv);
903 p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg;
906 if (p_params->update_tx_switching_flg) {
907 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
909 size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
910 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
911 p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
913 resp_size += sizeof(struct pfvf_def_resp_tlv);
915 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
918 if (p_params->update_approx_mcast_flg) {
919 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
921 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
922 p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
923 CHANNEL_TLV_VPORT_UPDATE_MCAST,
925 resp_size += sizeof(struct pfvf_def_resp_tlv);
927 OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
928 sizeof(unsigned long) *
929 ETH_MULTICAST_MAC_BINS_IN_REGS);
932 update_rx = p_params->accept_flags.update_rx_mode_config;
933 update_tx = p_params->accept_flags.update_tx_mode_config;
935 if (update_rx || update_tx) {
936 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
938 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
939 size = sizeof(struct vfpf_vport_update_accept_param_tlv);
940 p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
941 resp_size += sizeof(struct pfvf_def_resp_tlv);
944 p_accept_tlv->update_rx_mode = update_rx;
945 p_accept_tlv->rx_accept_filter =
946 p_params->accept_flags.rx_accept_filter;
950 p_accept_tlv->update_tx_mode = update_tx;
951 p_accept_tlv->tx_accept_filter =
952 p_params->accept_flags.tx_accept_filter;
956 if (p_params->rss_params) {
957 struct ecore_rss_params *rss_params = p_params->rss_params;
958 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
960 size = sizeof(struct vfpf_vport_update_rss_tlv);
961 p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
962 CHANNEL_TLV_VPORT_UPDATE_RSS, size);
963 resp_size += sizeof(struct pfvf_def_resp_tlv);
965 if (rss_params->update_rss_config)
966 p_rss_tlv->update_rss_flags |=
967 VFPF_UPDATE_RSS_CONFIG_FLAG;
968 if (rss_params->update_rss_capabilities)
969 p_rss_tlv->update_rss_flags |=
970 VFPF_UPDATE_RSS_CAPS_FLAG;
971 if (rss_params->update_rss_ind_table)
972 p_rss_tlv->update_rss_flags |=
973 VFPF_UPDATE_RSS_IND_TABLE_FLAG;
974 if (rss_params->update_rss_key)
975 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
977 p_rss_tlv->rss_enable = rss_params->rss_enable;
978 p_rss_tlv->rss_caps = rss_params->rss_caps;
979 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
980 OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
981 sizeof(rss_params->rss_ind_table));
982 OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
983 sizeof(rss_params->rss_key));
986 if (p_params->update_accept_any_vlan_flg) {
987 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
989 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
990 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
991 p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
994 resp_size += sizeof(struct pfvf_def_resp_tlv);
995 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
996 p_any_vlan_tlv->update_accept_any_vlan_flg =
997 p_params->update_accept_any_vlan_flg;
1000 if (p_params->sge_tpa_params) {
1001 struct ecore_sge_tpa_params *sge_tpa_params;
1002 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
1004 sge_tpa_params = p_params->sge_tpa_params;
1005 size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
1006 p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
1007 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
1009 resp_size += sizeof(struct pfvf_def_resp_tlv);
1011 if (sge_tpa_params->update_tpa_en_flg)
1012 p_sge_tpa_tlv->update_sge_tpa_flags |=
1013 VFPF_UPDATE_TPA_EN_FLAG;
1014 if (sge_tpa_params->update_tpa_param_flg)
1015 p_sge_tpa_tlv->update_sge_tpa_flags |=
1016 VFPF_UPDATE_TPA_PARAM_FLAG;
1018 if (sge_tpa_params->tpa_ipv4_en_flg)
1019 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG;
1020 if (sge_tpa_params->tpa_ipv6_en_flg)
1021 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG;
1022 if (sge_tpa_params->tpa_pkt_split_flg)
1023 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG;
1024 if (sge_tpa_params->tpa_hdr_data_split_flg)
1025 p_sge_tpa_tlv->sge_tpa_flags |=
1026 VFPF_TPA_HDR_DATA_SPLIT_FLAG;
1027 if (sge_tpa_params->tpa_gro_consistent_flg)
1028 p_sge_tpa_tlv->sge_tpa_flags |=
1029 VFPF_TPA_GRO_CONSIST_FLAG;
1031 p_sge_tpa_tlv->tpa_max_aggs_num =
1032 sge_tpa_params->tpa_max_aggs_num;
1033 p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size;
1034 p_sge_tpa_tlv->tpa_min_size_to_start =
1035 sge_tpa_params->tpa_min_size_to_start;
1036 p_sge_tpa_tlv->tpa_min_size_to_cont =
1037 sge_tpa_params->tpa_min_size_to_cont;
1039 p_sge_tpa_tlv->max_buffers_per_cqe =
1040 sge_tpa_params->max_buffers_per_cqe;
1043 /* add list termination tlv */
1044 ecore_add_tlv(p_hwfn, &p_iov->offset,
1045 CHANNEL_TLV_LIST_END,
1046 sizeof(struct channel_list_end_tlv));
1048 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
1052 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1057 ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
1060 ecore_vf_pf_req_end(p_hwfn, rc);
1065 enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
1067 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1068 struct pfvf_def_resp_tlv *resp;
1069 struct vfpf_first_tlv *req;
1072 /* clear mailbox and prep first tlv */
1073 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
1075 /* add list termination tlv */
1076 ecore_add_tlv(p_hwfn, &p_iov->offset,
1077 CHANNEL_TLV_LIST_END,
1078 sizeof(struct channel_list_end_tlv));
1080 resp = &p_iov->pf2vf_reply->default_resp;
1081 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1085 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1090 p_hwfn->b_int_enabled = 0;
1093 ecore_vf_pf_req_end(p_hwfn, rc);
1098 enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
1100 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1101 struct pfvf_def_resp_tlv *resp;
1102 struct vfpf_first_tlv *req;
1106 /* clear mailbox and prep first tlv */
1107 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
1109 /* add list termination tlv */
1110 ecore_add_tlv(p_hwfn, &p_iov->offset,
1111 CHANNEL_TLV_LIST_END,
1112 sizeof(struct channel_list_end_tlv));
1114 resp = &p_iov->pf2vf_reply->default_resp;
1115 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1117 if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
1120 ecore_vf_pf_req_end(p_hwfn, rc);
1122 p_hwfn->b_int_enabled = 0;
1124 if (p_iov->vf2pf_request)
1125 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1126 p_iov->vf2pf_request,
1127 p_iov->vf2pf_request_phys,
1128 sizeof(union vfpf_tlvs));
1129 if (p_iov->pf2vf_reply)
1130 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1132 p_iov->pf2vf_reply_phys,
1133 sizeof(union pfvf_tlvs));
1135 if (p_iov->bulletin.p_virt) {
1136 size = sizeof(struct ecore_bulletin_content);
1137 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1138 p_iov->bulletin.p_virt,
1139 p_iov->bulletin.phys, size);
1142 OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
1143 p_hwfn->vf_iov_info = OSAL_NULL;
1148 void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
1149 struct ecore_filter_mcast *p_filter_cmd)
1151 struct ecore_sp_vport_update_params sp_params;
1154 OSAL_MEMSET(&sp_params, 0, sizeof(sp_params));
1155 sp_params.update_approx_mcast_flg = 1;
1157 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1158 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1161 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1162 OSAL_SET_BIT(bit, sp_params.bins);
1166 ecore_vf_pf_vport_update(p_hwfn, &sp_params);
1169 enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
1170 struct ecore_filter_ucast
1173 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1174 struct vfpf_ucast_filter_tlv *req;
1175 struct pfvf_def_resp_tlv *resp;
1179 if (p_ucast->opcode == ECORE_FILTER_MOVE) {
1180 DP_NOTICE(p_hwfn, true,
1181 "VFs don't support Moving of filters\n");
1185 /* clear mailbox and prep first tlv */
1186 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
1187 req->opcode = (u8)p_ucast->opcode;
1188 req->type = (u8)p_ucast->type;
1189 OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN);
1190 req->vlan = p_ucast->vlan;
1192 /* add list termination tlv */
1193 ecore_add_tlv(p_hwfn, &p_iov->offset,
1194 CHANNEL_TLV_LIST_END,
1195 sizeof(struct channel_list_end_tlv));
1197 resp = &p_iov->pf2vf_reply->default_resp;
1198 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1202 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1208 ecore_vf_pf_req_end(p_hwfn, rc);
1213 enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
1215 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1216 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
1219 /* clear mailbox and prep first tlv */
1220 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
1221 sizeof(struct vfpf_first_tlv));
1223 /* add list termination tlv */
1224 ecore_add_tlv(p_hwfn, &p_iov->offset,
1225 CHANNEL_TLV_LIST_END,
1226 sizeof(struct channel_list_end_tlv));
1228 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1232 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1238 ecore_vf_pf_req_end(p_hwfn, rc);
1243 u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
1246 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1249 DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
1253 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
1256 enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
1259 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1260 struct ecore_bulletin_content shadow;
1263 crc_size = sizeof(p_iov->bulletin.p_virt->crc);
1266 /* Need to guarantee PF is not in the middle of writing it */
1267 OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
1269 /* If version did not update, no need to do anything */
1270 if (shadow.version == p_iov->bulletin_shadow.version)
1271 return ECORE_SUCCESS;
1273 /* Verify the bulletin we see is valid */
1274 crc = ecore_crc32(0, (u8 *)&shadow + crc_size,
1275 p_iov->bulletin.size - crc_size);
1276 if (crc != shadow.crc)
1279 /* Set the shadow bulletin and process it */
1280 OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
1282 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1283 "Read a bulletin update %08x\n", shadow.version);
1287 return ECORE_SUCCESS;
1290 void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
1291 struct ecore_mcp_link_params *p_params,
1292 struct ecore_bulletin_content *p_bulletin)
1294 OSAL_MEMSET(p_params, 0, sizeof(*p_params));
1296 p_params->speed.autoneg = p_bulletin->req_autoneg;
1297 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
1298 p_params->speed.forced_speed = p_bulletin->req_forced_speed;
1299 p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
1300 p_params->pause.forced_rx = p_bulletin->req_forced_rx;
1301 p_params->pause.forced_tx = p_bulletin->req_forced_tx;
1302 p_params->loopback_mode = p_bulletin->req_loopback;
1305 void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
1306 struct ecore_mcp_link_params *params)
1308 __ecore_vf_get_link_params(p_hwfn, params,
1309 &p_hwfn->vf_iov_info->bulletin_shadow);
1312 void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
1313 struct ecore_mcp_link_state *p_link,
1314 struct ecore_bulletin_content *p_bulletin)
1316 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1318 p_link->link_up = p_bulletin->link_up;
1319 p_link->speed = p_bulletin->speed;
1320 p_link->full_duplex = p_bulletin->full_duplex;
1321 p_link->an = p_bulletin->autoneg;
1322 p_link->an_complete = p_bulletin->autoneg_complete;
1323 p_link->parallel_detection = p_bulletin->parallel_detection;
1324 p_link->pfc_enabled = p_bulletin->pfc_enabled;
1325 p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
1326 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
1327 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
1328 p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
1329 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
1332 void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
1333 struct ecore_mcp_link_state *link)
1335 __ecore_vf_get_link_state(p_hwfn, link,
1336 &p_hwfn->vf_iov_info->bulletin_shadow);
1339 void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
1340 struct ecore_mcp_link_capabilities *p_link_caps,
1341 struct ecore_bulletin_content *p_bulletin)
1343 OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
1344 p_link_caps->speed_capabilities = p_bulletin->capability_speed;
1347 void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
1348 struct ecore_mcp_link_capabilities *p_link_caps)
1350 __ecore_vf_get_link_caps(p_hwfn, p_link_caps,
1351 &p_hwfn->vf_iov_info->bulletin_shadow);
1354 void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
1356 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
1359 void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
1361 OSAL_MEMCPY(port_mac,
1362 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac,
1366 void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
1367 u8 *num_vlan_filters)
1369 struct ecore_vf_iov *p_vf;
1371 p_vf = p_hwfn->vf_iov_info;
1372 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
1376 void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
1379 struct ecore_vf_iov *p_vf;
1381 p_vf = p_hwfn->vf_iov_info;
1382 *num_mac = p_vf->acquire_resp.resc.num_mac_filters;
1385 void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
1388 struct ecore_vf_iov *p_vf;
1390 p_vf = p_hwfn->vf_iov_info;
1391 *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
1394 bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
1396 struct ecore_bulletin_content *bulletin;
1398 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1399 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
1402 /* Forbid VF from changing a MAC enforced by PF */
1403 if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN))
1409 bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
1412 struct ecore_bulletin_content *bulletin;
1414 bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1416 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
1419 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
1426 OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN);
1431 bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
1433 struct ecore_bulletin_content *bulletin;
1435 bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1437 if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
1441 *dst_pvid = bulletin->pvid;
1446 bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn)
1448 return p_hwfn->vf_iov_info->b_pre_fp_hsi;
1451 void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
1452 u16 *fw_major, u16 *fw_minor, u16 *fw_rev,
1455 struct pf_vf_pfdev_info *info;
1457 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
1459 *fw_major = info->fw_major;
1460 *fw_minor = info->fw_minor;
1461 *fw_rev = info->fw_rev;
1462 *fw_eng = info->fw_eng;