2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_hsi_eth.h"
12 #include "ecore_sriov.h"
13 #include "ecore_l2_api.h"
15 #include "ecore_vfpf_if.h"
16 #include "ecore_status.h"
18 #include "ecore_int.h"
20 #include "ecore_mcp_api.h"
21 #include "ecore_vf_api.h"
23 static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
25 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
28 /* This lock is released when we receive PF's response
29 * in ecore_send_msg2pf().
30 * So, ecore_vf_pf_prep() and ecore_send_msg2pf()
31 * must come in sequence.
33 OSAL_MUTEX_ACQUIRE(&p_iov->mutex);
35 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
36 "preparing to send %s tlv over vf pf channel\n",
37 ecore_channel_tlvs_string[type]);
39 /* Reset Request offset */
40 p_iov->offset = (u8 *)(p_iov->vf2pf_request);
42 /* Clear mailbox - both request and reply */
43 OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
44 OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
46 /* Init type and length */
47 p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
49 /* Init first tlv header */
50 ((struct vfpf_first_tlv *)p_tlv)->reply_address =
51 (u64)p_iov->pf2vf_reply_phys;
56 static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
57 enum _ecore_status_t req_status)
59 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
61 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
62 "VF request status = 0x%x, PF reply status = 0x%x\n",
63 req_status, resp->default_resp.hdr.status);
65 OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
68 static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
69 u8 *done, u32 resp_size)
71 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
72 struct ustorm_trigger_vf_zone trigger;
73 struct ustorm_vf_zone *zone_data;
74 int rc = ECORE_SUCCESS, time = 100;
76 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
78 /* output tlvs list */
79 ecore_dp_tlv_list(p_hwfn, p_req);
81 /* need to add the END TLV to the message size */
82 resp_size += sizeof(struct channel_list_end_tlv);
84 if (!p_hwfn->p_dev->b_hw_channel) {
85 rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,
88 p_hwfn->vf_iov_info->pf2vf_reply,
89 sizeof(union vfpf_tlvs), resp_size);
90 /* TODO - no prints about message ? */
94 /* Send TLVs over HW channel */
95 OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
96 trigger.vf_pf_msg_valid = 1;
98 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
99 "VF -> PF [%02x] message: [%08x, %08x] --> %p,"
101 GET_FIELD(p_hwfn->hw_info.concrete_fid,
102 PXP_CONCRETE_FID_PFID),
103 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),
104 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),
105 &zone_data->non_trigger.vf_pf_msg_addr,
106 *((u32 *)&trigger), &zone_data->trigger);
109 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
110 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys));
113 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
114 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys));
116 /* The message data must be written first, to prevent trigger before
119 OSAL_WMB(p_hwfn->p_dev);
121 REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger,
124 /* When PF would be done with the response, it would write back to the
125 * `done' address. Poll until then.
127 while ((!*done) && time) {
133 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
134 "VF <-- PF Timeout [Type %d]\n",
135 p_req->first_tlv.tl.type);
139 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
140 "PF response: %d [Type %d]\n",
141 *done, p_req->first_tlv.tl.type);
147 #define VF_ACQUIRE_THRESH 3
148 static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
149 struct vf_pf_resc_request *p_req,
150 struct pf_vf_resc *p_resp)
152 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
153 "PF unwilling to fullill resource request: rxq [%02x/%02x]"
154 " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
155 " vlan [%02x/%02x] mc [%02x/%02x]."
156 " Try PF recommended amount\n",
157 p_req->num_rxqs, p_resp->num_rxqs,
158 p_req->num_rxqs, p_resp->num_txqs,
159 p_req->num_sbs, p_resp->num_sbs,
160 p_req->num_mac_filters, p_resp->num_mac_filters,
161 p_req->num_vlan_filters, p_resp->num_vlan_filters,
162 p_req->num_mc_filters, p_resp->num_mc_filters);
164 /* humble our request */
165 p_req->num_txqs = p_resp->num_txqs;
166 p_req->num_rxqs = p_resp->num_rxqs;
167 p_req->num_sbs = p_resp->num_sbs;
168 p_req->num_mac_filters = p_resp->num_mac_filters;
169 p_req->num_vlan_filters = p_resp->num_vlan_filters;
170 p_req->num_mc_filters = p_resp->num_mc_filters;
173 static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
175 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
176 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
177 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
178 struct ecore_vf_acquire_sw_info vf_sw_info;
179 struct vf_pf_resc_request *p_resc;
180 bool resources_acquired = false;
181 struct vfpf_acquire_tlv *req;
183 enum _ecore_status_t rc = ECORE_SUCCESS;
185 /* clear mailbox and prep first tlv */
186 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
187 p_resc = &req->resc_request;
189 /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */
190 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
192 p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF;
193 p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF;
194 p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
195 p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
196 p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
198 OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
199 OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
201 req->vfdev_info.os_type = vf_sw_info.os_type;
202 req->vfdev_info.driver_version = vf_sw_info.driver_version;
203 req->vfdev_info.fw_major = FW_MAJOR_VERSION;
204 req->vfdev_info.fw_minor = FW_MINOR_VERSION;
205 req->vfdev_info.fw_revision = FW_REVISION_VERSION;
206 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
207 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
208 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
210 /* Fill capability field with any non-deprecated config we support */
211 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
213 /* pf 2 vf bulletin board address */
214 req->bulletin_addr = p_iov->bulletin.phys;
215 req->bulletin_size = p_iov->bulletin.size;
217 /* add list termination tlv */
218 ecore_add_tlv(p_hwfn, &p_iov->offset,
219 CHANNEL_TLV_LIST_END,
220 sizeof(struct channel_list_end_tlv));
222 while (!resources_acquired) {
223 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
224 "attempting to acquire resources\n");
226 /* Clear response buffer, as this might be a re-send */
227 OSAL_MEMSET(p_iov->pf2vf_reply, 0,
228 sizeof(union pfvf_tlvs));
230 /* send acquire request */
231 rc = ecore_send_msg2pf(p_hwfn,
232 &resp->hdr.status, sizeof(*resp));
238 /* copy acquire response from buffer to p_hwfn */
239 OSAL_MEMCPY(&p_iov->acquire_resp,
240 resp, sizeof(p_iov->acquire_resp));
244 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
245 /* PF agrees to allocate our resources */
246 if (!(resp->pfdev_info.capabilities &
247 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
248 /* It's possible legacy PF mistakenly accepted;
249 * but we don't care - simply mark it as
250 * legacy and continue.
252 req->vfdev_info.capabilities |=
253 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
255 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
256 "resources acquired\n");
257 resources_acquired = true;
258 } /* PF refuses to allocate our resources */
259 else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
260 attempts < VF_ACQUIRE_THRESH) {
261 ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
264 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
265 if (pfdev_info->major_fp_hsi &&
266 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
267 DP_NOTICE(p_hwfn, false,
268 "PF uses an incompatible fastpath HSI"
269 " %02x.%02x [VF requires %02x.%02x]."
270 " Please change to a VF driver using"
272 pfdev_info->major_fp_hsi,
273 pfdev_info->minor_fp_hsi,
274 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR,
275 pfdev_info->major_fp_hsi);
280 if (!pfdev_info->major_fp_hsi) {
281 if (req->vfdev_info.capabilities &
282 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
283 DP_NOTICE(p_hwfn, false,
284 "PF uses very old drivers."
285 " Please change to a VF"
286 " driver using no later than"
292 "PF is old - try re-acquire to"
293 " see if it supports FW-version"
295 req->vfdev_info.capabilities |=
296 VFPF_ACQUIRE_CAP_PRE_FP_HSI;
300 /* If PF/VF are using same Major, PF must have had
301 * it's reasons. Simply fail.
303 DP_NOTICE(p_hwfn, false,
304 "PF rejected acquisition by VF\n");
309 "PF returned err %d to VF acquisition request\n",
316 /* Mark the PF as legacy, if needed */
317 if (req->vfdev_info.capabilities &
318 VFPF_ACQUIRE_CAP_PRE_FP_HSI)
319 p_iov->b_pre_fp_hsi = true;
321 rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
323 DP_NOTICE(p_hwfn, true,
324 "VF_UPDATE_ACQUIRE_RESC_RESP Failed:"
331 /* Update bulletin board size with response from PF */
332 p_iov->bulletin.size = resp->bulletin_size;
335 p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
336 p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;
338 DP_INFO(p_hwfn, "Chip details - %s%d\n",
339 ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
340 CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
342 p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
344 /* Learn of the possibility of CMT */
345 if (IS_LEAD_HWFN(p_hwfn)) {
346 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
347 DP_INFO(p_hwfn, "100g VF\n");
348 p_hwfn->p_dev->num_hwfns = 2;
353 if ((~p_iov->b_pre_fp_hsi &
354 ETH_HSI_VER_MINOR) &&
355 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR))
357 "PF is using older fastpath HSI;"
358 " %02x.%02x is configured\n",
360 resp->pfdev_info.minor_fp_hsi);
363 ecore_vf_pf_req_end(p_hwfn, rc);
368 enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
370 struct ecore_vf_iov *p_iov;
373 /* Set number of hwfns - might be overridden once leading hwfn learns
374 * actual configuration from PF.
376 if (IS_LEAD_HWFN(p_hwfn))
377 p_hwfn->p_dev->num_hwfns = 1;
379 /* Set the doorbell bar. Assumption: regview is set */
380 p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
381 PXP_VF_BAR0_START_DQ;
383 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
384 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
386 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
387 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
389 /* Allocate vf sriov info */
390 p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov));
392 DP_NOTICE(p_hwfn, true,
393 "Failed to allocate `struct ecore_sriov'\n");
397 OSAL_MEMSET(p_iov, 0, sizeof(*p_iov));
399 /* Allocate vf2pf msg */
400 p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
405 if (!p_iov->vf2pf_request) {
406 DP_NOTICE(p_hwfn, true,
407 "Failed to allocate `vf2pf_request' DMA memory\n");
411 p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
414 sizeof(union pfvf_tlvs));
415 if (!p_iov->pf2vf_reply) {
416 DP_NOTICE(p_hwfn, true,
417 "Failed to allocate `pf2vf_reply' DMA memory\n");
418 goto free_vf2pf_request;
421 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
422 "VF's Request mailbox [%p virt 0x%lx phys], "
423 "Response mailbox [%p virt 0x%lx phys]\n",
424 p_iov->vf2pf_request,
425 (unsigned long)p_iov->vf2pf_request_phys,
427 (unsigned long)p_iov->pf2vf_reply_phys);
429 /* Allocate Bulletin board */
430 p_iov->bulletin.size = sizeof(struct ecore_bulletin_content);
431 p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
436 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
437 "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
438 p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
439 p_iov->bulletin.size);
441 OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
442 OSAL_MUTEX_INIT(&p_iov->mutex);
444 p_hwfn->vf_iov_info = p_iov;
446 p_hwfn->hw_info.personality = ECORE_PCI_ETH;
448 return ecore_vf_pf_acquire(p_hwfn);
451 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
452 p_iov->vf2pf_request_phys,
453 sizeof(union vfpf_tlvs));
455 OSAL_FREE(p_hwfn->p_dev, p_iov);
460 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
461 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
462 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
464 enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
469 dma_addr_t bd_chain_phys_addr,
470 dma_addr_t cqe_pbl_addr,
472 void OSAL_IOMEM **pp_prod)
474 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
475 struct pfvf_start_queue_resp_tlv *resp;
476 struct vfpf_start_rxq_tlv *req;
479 /* clear mailbox and prep first tlv */
480 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
482 req->rx_qid = rx_qid;
483 req->cqe_pbl_addr = cqe_pbl_addr;
484 req->cqe_pbl_size = cqe_pbl_size;
485 req->rxq_addr = bd_chain_phys_addr;
487 req->sb_index = sb_index;
488 req->bd_max_bytes = bd_max_bytes;
489 req->stat_id = -1; /* Keep initialized, for future compatibility */
491 /* If PF is legacy, we'll need to calculate producers ourselves
492 * as well as clean them.
494 if (pp_prod && p_iov->b_pre_fp_hsi) {
495 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
496 u32 init_prod_val = 0;
498 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
499 MSTORM_QZONE_START(p_hwfn->p_dev) +
500 (hw_qid) * MSTORM_QZONE_SIZE;
502 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
503 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
504 (u32 *)(&init_prod_val));
507 /* add list termination tlv */
508 ecore_add_tlv(p_hwfn, &p_iov->offset,
509 CHANNEL_TLV_LIST_END,
510 sizeof(struct channel_list_end_tlv));
512 resp = &p_iov->pf2vf_reply->queue_start;
513 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
517 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
522 /* Learn the address of the producer from the response */
523 if (pp_prod && !p_iov->b_pre_fp_hsi) {
524 u32 init_prod_val = 0;
526 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
527 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
528 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
529 rx_qid, *pp_prod, resp->offset);
531 /* Init the rcq, rx bd and rx sge (if valid) producers to 0.
532 * It was actually the PF's responsibility, but since some
533 * old PFs might fail to do so, we do this as well.
535 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
536 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
537 (u32 *)&init_prod_val);
541 ecore_vf_pf_req_end(p_hwfn, rc);
546 enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
547 u16 rx_qid, bool cqe_completion)
549 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
550 struct vfpf_stop_rxqs_tlv *req;
551 struct pfvf_def_resp_tlv *resp;
554 /* clear mailbox and prep first tlv */
555 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
557 req->rx_qid = rx_qid;
559 req->cqe_completion = cqe_completion;
561 /* add list termination tlv */
562 ecore_add_tlv(p_hwfn, &p_iov->offset,
563 CHANNEL_TLV_LIST_END,
564 sizeof(struct channel_list_end_tlv));
566 resp = &p_iov->pf2vf_reply->default_resp;
567 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
571 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
577 ecore_vf_pf_req_end(p_hwfn, rc);
582 enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
588 void OSAL_IOMEM **pp_doorbell)
590 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
591 struct pfvf_start_queue_resp_tlv *resp;
592 struct vfpf_start_txq_tlv *req;
595 /* clear mailbox and prep first tlv */
596 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
598 req->tx_qid = tx_queue_id;
601 req->pbl_addr = pbl_addr;
602 req->pbl_size = pbl_size;
604 req->sb_index = sb_index;
606 /* add list termination tlv */
607 ecore_add_tlv(p_hwfn, &p_iov->offset,
608 CHANNEL_TLV_LIST_END,
609 sizeof(struct channel_list_end_tlv));
611 resp = &p_iov->pf2vf_reply->queue_start;
612 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
616 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
622 /* Modern PFs provide the actual offsets, while legacy
623 * provided only the queue id.
625 if (!p_iov->b_pre_fp_hsi) {
626 *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
629 u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
631 *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
632 DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
635 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
636 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
637 tx_queue_id, *pp_doorbell, resp->offset);
641 ecore_vf_pf_req_end(p_hwfn, rc);
646 enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
648 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
649 struct vfpf_stop_txqs_tlv *req;
650 struct pfvf_def_resp_tlv *resp;
653 /* clear mailbox and prep first tlv */
654 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
656 req->tx_qid = tx_qid;
659 /* add list termination tlv */
660 ecore_add_tlv(p_hwfn, &p_iov->offset,
661 CHANNEL_TLV_LIST_END,
662 sizeof(struct channel_list_end_tlv));
664 resp = &p_iov->pf2vf_reply->default_resp;
665 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
669 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
675 ecore_vf_pf_req_end(p_hwfn, rc);
680 enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
683 u8 comp_cqe_flg, u8 comp_event_flg)
685 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
686 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
687 struct vfpf_update_rxq_tlv *req;
690 /* clear mailbox and prep first tlv */
691 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
693 req->rx_qid = rx_queue_id;
694 req->num_rxqs = num_rxqs;
697 req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
699 req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
701 /* add list termination tlv */
702 ecore_add_tlv(p_hwfn, &p_iov->offset,
703 CHANNEL_TLV_LIST_END,
704 sizeof(struct channel_list_end_tlv));
706 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
710 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
716 ecore_vf_pf_req_end(p_hwfn, rc);
722 ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
723 u16 mtu, u8 inner_vlan_removal,
724 enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe,
727 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
728 struct vfpf_vport_start_tlv *req;
729 struct pfvf_def_resp_tlv *resp;
732 /* clear mailbox and prep first tlv */
733 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
736 req->vport_id = vport_id;
737 req->inner_vlan_removal = inner_vlan_removal;
738 req->tpa_mode = tpa_mode;
739 req->max_buffers_per_cqe = max_buffers_per_cqe;
740 req->only_untagged = only_untagged;
743 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
744 if (p_hwfn->sbs_info[i])
745 req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
747 /* add list termination tlv */
748 ecore_add_tlv(p_hwfn, &p_iov->offset,
749 CHANNEL_TLV_LIST_END,
750 sizeof(struct channel_list_end_tlv));
752 resp = &p_iov->pf2vf_reply->default_resp;
753 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
757 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
763 ecore_vf_pf_req_end(p_hwfn, rc);
768 enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
770 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
771 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
774 /* clear mailbox and prep first tlv */
775 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
776 sizeof(struct vfpf_first_tlv));
778 /* add list termination tlv */
779 ecore_add_tlv(p_hwfn, &p_iov->offset,
780 CHANNEL_TLV_LIST_END,
781 sizeof(struct channel_list_end_tlv));
783 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
787 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
793 ecore_vf_pf_req_end(p_hwfn, rc);
799 ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn,
800 struct ecore_sp_vport_update_params *p_data,
804 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
805 return !!(p_data->update_vport_active_rx_flg ||
806 p_data->update_vport_active_tx_flg);
807 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
809 /* FPGA doesn't have PVFC and so can't support tx-switching */
810 return !!(p_data->update_tx_switching_flg &&
811 !CHIP_REV_IS_FPGA(p_hwfn->p_dev));
813 return !!p_data->update_tx_switching_flg;
815 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
816 return !!p_data->update_inner_vlan_removal_flg;
817 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
818 return !!p_data->update_accept_any_vlan_flg;
819 case CHANNEL_TLV_VPORT_UPDATE_MCAST:
820 return !!p_data->update_approx_mcast_flg;
821 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
822 return !!(p_data->accept_flags.update_rx_mode_config ||
823 p_data->accept_flags.update_tx_mode_config);
824 case CHANNEL_TLV_VPORT_UPDATE_RSS:
825 return !!p_data->rss_params;
826 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
827 return !!p_data->sge_tpa_params;
829 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
830 tlv, ecore_channel_tlvs_string[tlv]);
836 ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
837 struct ecore_sp_vport_update_params *p_data)
839 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
840 struct pfvf_def_resp_tlv *p_resp;
843 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
844 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX;
846 if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
849 p_resp = (struct pfvf_def_resp_tlv *)
850 ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
851 if (p_resp && p_resp->hdr.status)
852 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
853 "TLV[%d] type %s Configuration %s\n",
854 tlv, ecore_channel_tlvs_string[tlv],
855 (p_resp && p_resp->hdr.status) ? "succeeded"
861 ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
862 struct ecore_sp_vport_update_params *p_params)
864 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
865 struct vfpf_vport_update_tlv *req;
866 struct pfvf_def_resp_tlv *resp;
867 u8 update_rx, update_tx;
872 resp = &p_iov->pf2vf_reply->default_resp;
873 resp_size = sizeof(*resp);
875 update_rx = p_params->update_vport_active_rx_flg;
876 update_tx = p_params->update_vport_active_tx_flg;
878 /* clear mailbox and prep header tlv */
879 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
881 /* Prepare extended tlvs */
882 if (update_rx || update_tx) {
883 struct vfpf_vport_update_activate_tlv *p_act_tlv;
885 size = sizeof(struct vfpf_vport_update_activate_tlv);
886 p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
887 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
889 resp_size += sizeof(struct pfvf_def_resp_tlv);
892 p_act_tlv->update_rx = update_rx;
893 p_act_tlv->active_rx = p_params->vport_active_rx_flg;
897 p_act_tlv->update_tx = update_tx;
898 p_act_tlv->active_tx = p_params->vport_active_tx_flg;
902 if (p_params->update_inner_vlan_removal_flg) {
903 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
905 size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
906 p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
907 CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
909 resp_size += sizeof(struct pfvf_def_resp_tlv);
911 p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg;
914 if (p_params->update_tx_switching_flg) {
915 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
917 size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
918 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
919 p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
921 resp_size += sizeof(struct pfvf_def_resp_tlv);
923 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
926 if (p_params->update_approx_mcast_flg) {
927 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
929 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
930 p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
931 CHANNEL_TLV_VPORT_UPDATE_MCAST,
933 resp_size += sizeof(struct pfvf_def_resp_tlv);
935 OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
936 sizeof(unsigned long) *
937 ETH_MULTICAST_MAC_BINS_IN_REGS);
940 update_rx = p_params->accept_flags.update_rx_mode_config;
941 update_tx = p_params->accept_flags.update_tx_mode_config;
943 if (update_rx || update_tx) {
944 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
946 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
947 size = sizeof(struct vfpf_vport_update_accept_param_tlv);
948 p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
949 resp_size += sizeof(struct pfvf_def_resp_tlv);
952 p_accept_tlv->update_rx_mode = update_rx;
953 p_accept_tlv->rx_accept_filter =
954 p_params->accept_flags.rx_accept_filter;
958 p_accept_tlv->update_tx_mode = update_tx;
959 p_accept_tlv->tx_accept_filter =
960 p_params->accept_flags.tx_accept_filter;
964 if (p_params->rss_params) {
965 struct ecore_rss_params *rss_params = p_params->rss_params;
966 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
968 size = sizeof(struct vfpf_vport_update_rss_tlv);
969 p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
970 CHANNEL_TLV_VPORT_UPDATE_RSS, size);
971 resp_size += sizeof(struct pfvf_def_resp_tlv);
973 if (rss_params->update_rss_config)
974 p_rss_tlv->update_rss_flags |=
975 VFPF_UPDATE_RSS_CONFIG_FLAG;
976 if (rss_params->update_rss_capabilities)
977 p_rss_tlv->update_rss_flags |=
978 VFPF_UPDATE_RSS_CAPS_FLAG;
979 if (rss_params->update_rss_ind_table)
980 p_rss_tlv->update_rss_flags |=
981 VFPF_UPDATE_RSS_IND_TABLE_FLAG;
982 if (rss_params->update_rss_key)
983 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
985 p_rss_tlv->rss_enable = rss_params->rss_enable;
986 p_rss_tlv->rss_caps = rss_params->rss_caps;
987 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
988 OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
989 sizeof(rss_params->rss_ind_table));
990 OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
991 sizeof(rss_params->rss_key));
994 if (p_params->update_accept_any_vlan_flg) {
995 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
997 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
998 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
999 p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
1002 resp_size += sizeof(struct pfvf_def_resp_tlv);
1003 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
1004 p_any_vlan_tlv->update_accept_any_vlan_flg =
1005 p_params->update_accept_any_vlan_flg;
1008 if (p_params->sge_tpa_params) {
1009 struct ecore_sge_tpa_params *sge_tpa_params;
1010 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
1012 sge_tpa_params = p_params->sge_tpa_params;
1013 size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
1014 p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
1015 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
1017 resp_size += sizeof(struct pfvf_def_resp_tlv);
1019 if (sge_tpa_params->update_tpa_en_flg)
1020 p_sge_tpa_tlv->update_sge_tpa_flags |=
1021 VFPF_UPDATE_TPA_EN_FLAG;
1022 if (sge_tpa_params->update_tpa_param_flg)
1023 p_sge_tpa_tlv->update_sge_tpa_flags |=
1024 VFPF_UPDATE_TPA_PARAM_FLAG;
1026 if (sge_tpa_params->tpa_ipv4_en_flg)
1027 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG;
1028 if (sge_tpa_params->tpa_ipv6_en_flg)
1029 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG;
1030 if (sge_tpa_params->tpa_pkt_split_flg)
1031 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG;
1032 if (sge_tpa_params->tpa_hdr_data_split_flg)
1033 p_sge_tpa_tlv->sge_tpa_flags |=
1034 VFPF_TPA_HDR_DATA_SPLIT_FLAG;
1035 if (sge_tpa_params->tpa_gro_consistent_flg)
1036 p_sge_tpa_tlv->sge_tpa_flags |=
1037 VFPF_TPA_GRO_CONSIST_FLAG;
1039 p_sge_tpa_tlv->tpa_max_aggs_num =
1040 sge_tpa_params->tpa_max_aggs_num;
1041 p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size;
1042 p_sge_tpa_tlv->tpa_min_size_to_start =
1043 sge_tpa_params->tpa_min_size_to_start;
1044 p_sge_tpa_tlv->tpa_min_size_to_cont =
1045 sge_tpa_params->tpa_min_size_to_cont;
1047 p_sge_tpa_tlv->max_buffers_per_cqe =
1048 sge_tpa_params->max_buffers_per_cqe;
1051 /* add list termination tlv */
1052 ecore_add_tlv(p_hwfn, &p_iov->offset,
1053 CHANNEL_TLV_LIST_END,
1054 sizeof(struct channel_list_end_tlv));
1056 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
1060 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1065 ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
1068 ecore_vf_pf_req_end(p_hwfn, rc);
1073 enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
1075 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1076 struct pfvf_def_resp_tlv *resp;
1077 struct vfpf_first_tlv *req;
1080 /* clear mailbox and prep first tlv */
1081 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
1083 /* add list termination tlv */
1084 ecore_add_tlv(p_hwfn, &p_iov->offset,
1085 CHANNEL_TLV_LIST_END,
1086 sizeof(struct channel_list_end_tlv));
1088 resp = &p_iov->pf2vf_reply->default_resp;
1089 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1093 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1098 p_hwfn->b_int_enabled = 0;
1101 ecore_vf_pf_req_end(p_hwfn, rc);
1106 enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
1108 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1109 struct pfvf_def_resp_tlv *resp;
1110 struct vfpf_first_tlv *req;
1114 /* clear mailbox and prep first tlv */
1115 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
1117 /* add list termination tlv */
1118 ecore_add_tlv(p_hwfn, &p_iov->offset,
1119 CHANNEL_TLV_LIST_END,
1120 sizeof(struct channel_list_end_tlv));
1122 resp = &p_iov->pf2vf_reply->default_resp;
1123 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1125 if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
1128 ecore_vf_pf_req_end(p_hwfn, rc);
1130 p_hwfn->b_int_enabled = 0;
1132 if (p_iov->vf2pf_request)
1133 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1134 p_iov->vf2pf_request,
1135 p_iov->vf2pf_request_phys,
1136 sizeof(union vfpf_tlvs));
1137 if (p_iov->pf2vf_reply)
1138 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1140 p_iov->pf2vf_reply_phys,
1141 sizeof(union pfvf_tlvs));
1143 if (p_iov->bulletin.p_virt) {
1144 size = sizeof(struct ecore_bulletin_content);
1145 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1146 p_iov->bulletin.p_virt,
1147 p_iov->bulletin.phys, size);
1150 OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
1151 p_hwfn->vf_iov_info = OSAL_NULL;
1156 void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
1157 struct ecore_filter_mcast *p_filter_cmd)
1159 struct ecore_sp_vport_update_params sp_params;
1162 OSAL_MEMSET(&sp_params, 0, sizeof(sp_params));
1163 sp_params.update_approx_mcast_flg = 1;
1165 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1166 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1169 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1170 OSAL_SET_BIT(bit, sp_params.bins);
1174 ecore_vf_pf_vport_update(p_hwfn, &sp_params);
1177 enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
1178 struct ecore_filter_ucast
1181 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1182 struct vfpf_ucast_filter_tlv *req;
1183 struct pfvf_def_resp_tlv *resp;
1187 if (p_ucast->opcode == ECORE_FILTER_MOVE) {
1188 DP_NOTICE(p_hwfn, true,
1189 "VFs don't support Moving of filters\n");
1193 /* clear mailbox and prep first tlv */
1194 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
1195 req->opcode = (u8)p_ucast->opcode;
1196 req->type = (u8)p_ucast->type;
1197 OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN);
1198 req->vlan = p_ucast->vlan;
1200 /* add list termination tlv */
1201 ecore_add_tlv(p_hwfn, &p_iov->offset,
1202 CHANNEL_TLV_LIST_END,
1203 sizeof(struct channel_list_end_tlv));
1205 resp = &p_iov->pf2vf_reply->default_resp;
1206 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1210 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1216 ecore_vf_pf_req_end(p_hwfn, rc);
1221 enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
1223 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1224 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
1227 /* clear mailbox and prep first tlv */
1228 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
1229 sizeof(struct vfpf_first_tlv));
1231 /* add list termination tlv */
1232 ecore_add_tlv(p_hwfn, &p_iov->offset,
1233 CHANNEL_TLV_LIST_END,
1234 sizeof(struct channel_list_end_tlv));
1236 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1240 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1246 ecore_vf_pf_req_end(p_hwfn, rc);
1251 u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
1254 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1257 DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
1261 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
1264 enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
1267 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1268 struct ecore_bulletin_content shadow;
1271 crc_size = sizeof(p_iov->bulletin.p_virt->crc);
1274 /* Need to guarantee PF is not in the middle of writing it */
1275 OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
1277 /* If version did not update, no need to do anything */
1278 if (shadow.version == p_iov->bulletin_shadow.version)
1279 return ECORE_SUCCESS;
1281 /* Verify the bulletin we see is valid */
1282 crc = ecore_crc32(0, (u8 *)&shadow + crc_size,
1283 p_iov->bulletin.size - crc_size);
1284 if (crc != shadow.crc)
1287 /* Set the shadow bulletin and process it */
1288 OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
1290 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1291 "Read a bulletin update %08x\n", shadow.version);
1295 return ECORE_SUCCESS;
1298 void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
1299 struct ecore_mcp_link_params *p_params,
1300 struct ecore_bulletin_content *p_bulletin)
1302 OSAL_MEMSET(p_params, 0, sizeof(*p_params));
1304 p_params->speed.autoneg = p_bulletin->req_autoneg;
1305 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
1306 p_params->speed.forced_speed = p_bulletin->req_forced_speed;
1307 p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
1308 p_params->pause.forced_rx = p_bulletin->req_forced_rx;
1309 p_params->pause.forced_tx = p_bulletin->req_forced_tx;
1310 p_params->loopback_mode = p_bulletin->req_loopback;
1313 void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
1314 struct ecore_mcp_link_params *params)
1316 __ecore_vf_get_link_params(p_hwfn, params,
1317 &p_hwfn->vf_iov_info->bulletin_shadow);
1320 void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
1321 struct ecore_mcp_link_state *p_link,
1322 struct ecore_bulletin_content *p_bulletin)
1324 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1326 p_link->link_up = p_bulletin->link_up;
1327 p_link->speed = p_bulletin->speed;
1328 p_link->full_duplex = p_bulletin->full_duplex;
1329 p_link->an = p_bulletin->autoneg;
1330 p_link->an_complete = p_bulletin->autoneg_complete;
1331 p_link->parallel_detection = p_bulletin->parallel_detection;
1332 p_link->pfc_enabled = p_bulletin->pfc_enabled;
1333 p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
1334 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
1335 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
1336 p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
1337 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
1340 void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
1341 struct ecore_mcp_link_state *link)
1343 __ecore_vf_get_link_state(p_hwfn, link,
1344 &p_hwfn->vf_iov_info->bulletin_shadow);
1347 void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
1348 struct ecore_mcp_link_capabilities *p_link_caps,
1349 struct ecore_bulletin_content *p_bulletin)
1351 OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
1352 p_link_caps->speed_capabilities = p_bulletin->capability_speed;
1355 void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
1356 struct ecore_mcp_link_capabilities *p_link_caps)
1358 __ecore_vf_get_link_caps(p_hwfn, p_link_caps,
1359 &p_hwfn->vf_iov_info->bulletin_shadow);
1362 void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
1364 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
1367 void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
1369 OSAL_MEMCPY(port_mac,
1370 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac,
1374 void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
1375 u8 *num_vlan_filters)
1377 struct ecore_vf_iov *p_vf;
1379 p_vf = p_hwfn->vf_iov_info;
1380 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
1384 void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
1387 struct ecore_vf_iov *p_vf;
1389 p_vf = p_hwfn->vf_iov_info;
1390 *num_mac = p_vf->acquire_resp.resc.num_mac_filters;
1393 void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
1396 struct ecore_vf_iov *p_vf;
1398 p_vf = p_hwfn->vf_iov_info;
1399 *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
1402 bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
1404 struct ecore_bulletin_content *bulletin;
1406 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1407 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
1410 /* Forbid VF from changing a MAC enforced by PF */
1411 if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN))
1417 bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
1420 struct ecore_bulletin_content *bulletin;
1422 bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1424 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
1427 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
1434 OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN);
1439 bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
1441 struct ecore_bulletin_content *bulletin;
1443 bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1445 if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
1449 *dst_pvid = bulletin->pvid;
1454 bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn)
1456 return p_hwfn->vf_iov_info->b_pre_fp_hsi;
1459 void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
1460 u16 *fw_major, u16 *fw_minor, u16 *fw_rev,
1463 struct pf_vf_pfdev_info *info;
1465 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
1467 *fw_major = info->fw_major;
1468 *fw_minor = info->fw_minor;
1469 *fw_rev = info->fw_rev;
1470 *fw_eng = info->fw_eng;