2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
4 * Copyright (c) 2015 QLogic Corporation.
8 * See LICENSE.bnx2x_pmd for copyright and licensing details.
13 /* calculate the crc in the bulletin board */
14 static inline uint32_t
15 bnx2x_vf_crc(struct bnx2x_vf_bulletin *bull)
17 uint32_t crc_sz = sizeof(bull->crc), length = bull->length - crc_sz;
19 return ECORE_CRC32_LE(0, (uint8_t *)bull + crc_sz, length);
22 /* Checks are there mac/channel updates for VF
23 * returns TRUE if something was updated
26 bnx2x_check_bull(struct bnx2x_softc *sc)
28 struct bnx2x_vf_bulletin *bull;
30 uint16_t old_version = sc->old_bulletin.version;
31 uint64_t valid_bitmap;
33 bull = sc->pf2vf_bulletin;
34 if (old_version == bull->version) {
37 /* Check the crc until we get the correct data */
38 while (tries < BNX2X_VF_BULLETIN_TRIES) {
39 bull = sc->pf2vf_bulletin;
40 if (bull->crc == bnx2x_vf_crc(bull))
43 PMD_DRV_LOG(ERR, "bad crc on bulletin board. contained %x computed %x",
44 bull->crc, bnx2x_vf_crc(bull));
47 if (tries == BNX2X_VF_BULLETIN_TRIES) {
48 PMD_DRV_LOG(ERR, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting",
54 valid_bitmap = bull->valid_bitmap;
56 /* check the mac address and VLAN and allocate memory if valid */
57 if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN))
58 rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN);
59 if (valid_bitmap & (1 << VLAN_VALID))
60 rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, VLAN_HLEN);
62 sc->old_bulletin = *bull;
67 /* place a given tlv on the tlv buffer at a given offset */
69 bnx2x_add_tlv(__rte_unused struct bnx2x_softc *sc, void *tlvs_list,
70 uint16_t offset, uint16_t type, uint16_t length)
72 struct channel_tlv *tl = (struct channel_tlv *)
73 ((unsigned long)tlvs_list + offset);
79 /* Initiliaze header of the first tlv and clear mailbox*/
81 bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv,
82 uint16_t type, uint16_t length)
84 struct bnx2x_vf_mbx_msg *mbox = sc->vf2pf_mbox;
86 rte_spinlock_lock(&sc->vf2pf_lock);
88 PMD_DRV_LOG(DEBUG, "Preparing %d tlv for sending", type);
90 memset(mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
92 bnx2x_add_tlv(sc, &first_tlv->tl, 0, type, length);
94 /* Initialize header of the first tlv */
95 first_tlv->reply_offset = sizeof(mbox->query);
98 /* releases the mailbox */
100 bnx2x_vf_finalize(struct bnx2x_softc *sc,
101 __rte_unused struct vf_first_tlv *first_tlv)
103 PMD_DRV_LOG(DEBUG, "done sending [%d] tlv over vf pf channel",
106 rte_spinlock_unlock(&sc->vf2pf_lock);
109 #define BNX2X_VF_CMD_ADDR_LO PXP_VF_ADDR_CSDM_GLOBAL_START
110 #define BNX2X_VF_CMD_ADDR_HI BNX2X_VF_CMD_ADDR_LO + 4
111 #define BNX2X_VF_CMD_TRIGGER BNX2X_VF_CMD_ADDR_HI + 4
112 #define BNX2X_VF_CHANNEL_DELAY 100
113 #define BNX2X_VF_CHANNEL_TRIES 100
116 bnx2x_do_req4pf(struct bnx2x_softc *sc, phys_addr_t phys_addr)
118 uint8_t *status = &sc->vf2pf_mbox->resp.common_reply.status;
122 bnx2x_check_bull(sc);
123 if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
124 PMD_DRV_LOG(ERR, "channel is down. Aborting message sending");
125 *status = BNX2X_VF_STATUS_SUCCESS;
129 REG_WR(sc, BNX2X_VF_CMD_ADDR_LO, U64_LO(phys_addr));
130 REG_WR(sc, BNX2X_VF_CMD_ADDR_HI, U64_HI(phys_addr));
132 /* memory barrier to ensure that FW can read phys_addr */
135 REG_WR8(sc, BNX2X_VF_CMD_TRIGGER, 1);
137 /* Do several attempts until PF completes
138 * "." is used to show progress
140 for (i = 0; i < BNX2X_VF_CHANNEL_TRIES; i++) {
141 DELAY_MS(BNX2X_VF_CHANNEL_DELAY);
147 PMD_DRV_LOG(ERR, "Response from PF timed out");
151 PMD_DRV_LOG(ERR, "status should be zero before message"
156 PMD_DRV_LOG(DEBUG, "Response from PF was received");
160 static inline uint16_t bnx2x_check_me_flags(uint32_t val)
162 if (((val) & ME_REG_VF_VALID) && (!((val) & ME_REG_VF_ERR)))
163 return ME_REG_VF_VALID;
168 #define BNX2X_ME_ANSWER_DELAY 100
169 #define BNX2X_ME_ANSWER_TRIES 10
171 static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc)
176 while (i <= BNX2X_ME_ANSWER_TRIES) {
177 val = BNX2X_DB_READ(DOORBELL_ADDR(sc, 0));
178 if (bnx2x_check_me_flags(val))
181 DELAY_MS(BNX2X_ME_ANSWER_DELAY);
188 #define BNX2X_VF_OBTAIN_MAX_TRIES 3
189 #define BNX2X_VF_OBTAIN_MAC_FILTERS 1
190 #define BNX2X_VF_OBTAIN_MC_FILTERS 10
192 struct bnx2x_obtain_status {
198 struct bnx2x_obtain_status bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
201 struct vf_acquire_resp_tlv *resp = &sc->vf2pf_mbox->resp.acquire_resp,
202 *sc_resp = &sc->acquire_resp;
203 struct vf_resource_query *res_query;
204 struct vf_resc *resc;
205 struct bnx2x_obtain_status status;
206 int res_obtained = false;
209 PMD_DRV_LOG(DEBUG, "trying to get resources");
211 if (bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr)) {
214 status.err_code = -EAGAIN;
218 memcpy(sc_resp, resp, sizeof(sc->acquire_resp));
222 /* check PF to request acceptance */
223 if (sc_resp->status == BNX2X_VF_STATUS_SUCCESS) {
224 PMD_DRV_LOG(DEBUG, "resources obtained successfully");
226 } else if (sc_resp->status == BNX2X_VF_STATUS_NO_RESOURCES &&
227 tries < BNX2X_VF_OBTAIN_MAX_TRIES) {
229 "PF cannot allocate requested amount of resources");
231 res_query = &sc->vf2pf_mbox->query[0].acquire.res_query;
232 resc = &sc_resp->resc;
234 /* PF refused our request. Try to decrease request params */
235 res_query->num_txqs = min(res_query->num_txqs, resc->num_txqs);
236 res_query->num_rxqs = min(res_query->num_rxqs, resc->num_rxqs);
237 res_query->num_sbs = min(res_query->num_sbs, resc->num_sbs);
238 res_query->num_mac_filters = min(res_query->num_mac_filters, resc->num_mac_filters);
239 res_query->num_vlan_filters = min(res_query->num_vlan_filters, resc->num_vlan_filters);
240 res_query->num_mc_filters = min(res_query->num_mc_filters, resc->num_mc_filters);
242 memset(&sc->vf2pf_mbox->resp, 0, sizeof(union resp_tlvs));
244 PMD_DRV_LOG(ERR, "Resources cannot be obtained. Status of handling: %d. Aborting",
247 status.err_code = -EAGAIN;
250 } while (!res_obtained);
256 int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count)
258 struct vf_acquire_tlv *acq = &sc->vf2pf_mbox->query[0].acquire;
260 struct bnx2x_obtain_status obtain_status;
264 bnx2x_vf_prep(sc, &acq->first_tlv, BNX2X_VF_TLV_ACQUIRE, sizeof(*acq));
266 vf_id = bnx2x_read_vf_id(sc);
274 acq->res_query.num_rxqs = rx_count;
275 acq->res_query.num_txqs = tx_count;
276 acq->res_query.num_sbs = sc->igu_sb_cnt;
277 acq->res_query.num_mac_filters = BNX2X_VF_OBTAIN_MAC_FILTERS;
278 acq->res_query.num_mc_filters = BNX2X_VF_OBTAIN_MC_FILTERS;
280 acq->bulletin_addr = sc->pf2vf_bulletin_mapping.paddr;
282 /* Request physical port identifier */
283 bnx2x_add_tlv(sc, acq, acq->first_tlv.tl.length,
284 BNX2X_VF_TLV_PHYS_PORT_ID,
285 sizeof(struct channel_tlv));
287 bnx2x_add_tlv(sc, acq,
288 (acq->first_tlv.tl.length + sizeof(struct channel_tlv)),
289 BNX2X_VF_TLV_LIST_END,
290 sizeof(struct channel_list_end_tlv));
292 /* requesting the resources in loop */
293 obtain_status = bnx2x_loop_obtain_resources(sc);
294 if (!obtain_status.success) {
295 rc = obtain_status.err_code;
299 struct vf_acquire_resp_tlv sc_resp = sc->acquire_resp;
301 sc->devinfo.chip_id |= (sc_resp.chip_num & 0xFFFF);
302 sc->devinfo.int_block = INT_BLOCK_IGU;
303 sc->devinfo.chip_port_mode = CHIP_2_PORT_MODE;
304 sc->devinfo.mf_info.mf_ov = 0;
305 sc->devinfo.mf_info.mf_mode = 0;
306 sc->devinfo.flash_size = 0;
308 sc->igu_sb_cnt = sc_resp.resc.num_sbs;
309 sc->igu_base_sb = sc_resp.resc.hw_sbs[0] & 0xFF;
311 sc->max_tx_queues = sc_resp.resc.num_txqs;
312 sc->max_rx_queues = sc_resp.resc.num_rxqs;
314 sc->link_params.chip_id = sc->devinfo.chip_id;
315 sc->doorbell_size = sc_resp.db_size;
316 sc->flags |= BNX2X_NO_WOL_FLAG | BNX2X_NO_ISCSI_OOO_FLAG | BNX2X_NO_ISCSI_FLAG | BNX2X_NO_FCOE_FLAG;
318 PMD_DRV_LOG(DEBUG, "status block count = %d, base status block = %x",
319 sc->igu_sb_cnt, sc->igu_base_sb);
320 strncpy(sc->fw_ver, sc_resp.fw_ver, sizeof(sc->fw_ver));
322 if (is_valid_assigned_ether_addr(&sc_resp.resc.current_mac_addr))
323 ether_addr_copy(&sc_resp.resc.current_mac_addr,
324 (struct ether_addr *)sc->link_params.mac_addr);
326 eth_random_addr(sc->link_params.mac_addr);
329 bnx2x_vf_finalize(sc, &acq->first_tlv);
334 /* Ask PF to release VF's resources */
336 bnx2x_vf_close(struct bnx2x_softc *sc)
338 struct vf_release_tlv *query;
339 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
340 int vf_id = bnx2x_read_vf_id(sc);
343 query = &sc->vf2pf_mbox->query[0].release;
344 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE,
347 query->vf_id = vf_id;
348 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
349 BNX2X_VF_TLV_LIST_END,
350 sizeof(struct channel_list_end_tlv));
352 bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
353 if (reply->status != BNX2X_VF_STATUS_SUCCESS)
354 PMD_DRV_LOG(ERR, "Failed to release VF");
356 bnx2x_vf_finalize(sc, &query->first_tlv);
360 /* Let PF know the VF status blocks phys_addrs */
362 bnx2x_vf_init(struct bnx2x_softc *sc)
364 struct vf_init_tlv *query;
365 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
368 query = &sc->vf2pf_mbox->query[0].init;
369 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_INIT,
372 FOR_EACH_QUEUE(sc, i) {
373 query->sb_addr[i] = (unsigned long)(sc->fp[i].sb_dma.paddr);
376 query->stats_step = sizeof(struct per_queue_stats);
377 query->stats_addr = sc->fw_stats_data_mapping +
378 offsetof(struct bnx2x_fw_stats_data, queue_stats);
380 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
381 BNX2X_VF_TLV_LIST_END,
382 sizeof(struct channel_list_end_tlv));
384 bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
385 if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
386 PMD_DRV_LOG(ERR, "Failed to init VF");
391 PMD_DRV_LOG(DEBUG, "VF was initialized");
393 bnx2x_vf_finalize(sc, &query->first_tlv);
398 bnx2x_vf_unload(struct bnx2x_softc *sc)
400 struct vf_close_tlv *query;
401 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
402 struct vf_q_op_tlv *query_op;
405 vf_id = bnx2x_read_vf_id(sc);
407 FOR_EACH_QUEUE(sc, i) {
408 query_op = &sc->vf2pf_mbox->query[0].q_op;
409 bnx2x_vf_prep(sc, &query_op->first_tlv,
410 BNX2X_VF_TLV_TEARDOWN_Q,
413 query_op->vf_qid = i;
415 bnx2x_add_tlv(sc, query_op,
416 query_op->first_tlv.tl.length,
417 BNX2X_VF_TLV_LIST_END,
418 sizeof(struct channel_list_end_tlv));
420 bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
421 if (reply->status != BNX2X_VF_STATUS_SUCCESS)
423 "Bad reply for vf_q %d teardown", i);
425 bnx2x_vf_finalize(sc, &query_op->first_tlv);
428 bnx2x_vf_set_mac(sc, false);
430 query = &sc->vf2pf_mbox->query[0].close;
431 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE,
434 query->vf_id = vf_id;
436 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
437 BNX2X_VF_TLV_LIST_END,
438 sizeof(struct channel_list_end_tlv));
440 bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
441 if (reply->status != BNX2X_VF_STATUS_SUCCESS)
443 "Bad reply from PF for close message");
445 bnx2x_vf_finalize(sc, &query->first_tlv);
449 static inline uint16_t
450 bnx2x_vf_q_flags(uint8_t leading)
452 uint16_t flags = leading ? BNX2X_VF_Q_FLAG_LEADING_RSS : 0;
454 flags |= BNX2X_VF_Q_FLAG_CACHE_ALIGN;
455 flags |= BNX2X_VF_Q_FLAG_STATS;
456 flags |= BNX2X_VF_Q_FLAG_VLAN;
462 bnx2x_vf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
463 struct vf_rxq_params *rxq_init, uint16_t flags)
465 struct bnx2x_rx_queue *rxq;
467 rxq = sc->rx_queues[fp->index];
469 PMD_DRV_LOG(ERR, "RX queue %d is NULL", fp->index);
473 rxq_init->rcq_addr = rxq->cq_ring_phys_addr;
474 rxq_init->rcq_np_addr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE;
475 rxq_init->rxq_addr = rxq->rx_ring_phys_addr;
476 rxq_init->vf_sb_id = fp->index;
477 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
478 rxq_init->mtu = sc->mtu;
479 rxq_init->buf_sz = fp->rx_buf_size;
480 rxq_init->flags = flags;
481 rxq_init->stat_id = -1;
482 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
486 bnx2x_vf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
487 struct vf_txq_params *txq_init, uint16_t flags)
489 struct bnx2x_tx_queue *txq;
491 txq = sc->tx_queues[fp->index];
493 PMD_DRV_LOG(ERR, "TX queue %d is NULL", fp->index);
497 txq_init->txq_addr = txq->tx_ring_phys_addr;
498 txq_init->sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
499 txq_init->flags = flags;
500 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
501 txq_init->vf_sb_id = fp->index;
505 bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, int leading)
507 struct vf_setup_q_tlv *query;
508 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
509 uint16_t flags = bnx2x_vf_q_flags(leading);
512 query = &sc->vf2pf_mbox->query[0].setup_q;
513 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SETUP_Q,
516 query->vf_qid = fp->index;
517 query->param_valid = VF_RXQ_VALID | VF_TXQ_VALID;
519 bnx2x_vf_rx_q_prep(sc, fp, &query->rxq, flags);
520 bnx2x_vf_tx_q_prep(sc, fp, &query->txq, flags);
522 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
523 BNX2X_VF_TLV_LIST_END,
524 sizeof(struct channel_list_end_tlv));
526 bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
527 if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
528 PMD_DRV_LOG(ERR, "Failed to setup VF queue[%d]",
533 bnx2x_vf_finalize(sc, &query->first_tlv);
539 bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
541 struct vf_set_q_filters_tlv *query;
542 struct vf_common_reply_tlv *reply;
545 query = &sc->vf2pf_mbox->query[0].set_q_filters;
546 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
549 query->vf_qid = sc->fp->index;
550 query->mac_filters_cnt = 1;
551 query->flags = BNX2X_VF_MAC_VLAN_CHANGED;
553 query->filters[0].flags = (set ? BNX2X_VF_Q_FILTER_SET_MAC : 0) |
554 BNX2X_VF_Q_FILTER_DEST_MAC_VALID;
556 bnx2x_check_bull(sc);
558 rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN);
560 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
561 BNX2X_VF_TLV_LIST_END,
562 sizeof(struct channel_list_end_tlv));
564 bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
565 reply = &sc->vf2pf_mbox->resp.common_reply;
567 while (BNX2X_VF_STATUS_FAILURE == reply->status &&
568 bnx2x_check_bull(sc)) {
569 /* A new mac was configured by PF for us */
570 rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac,
572 rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac,
575 bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
578 if (BNX2X_VF_STATUS_SUCCESS != reply->status) {
579 PMD_DRV_LOG(ERR, "Bad reply from PF for SET MAC message: %d",
584 bnx2x_vf_finalize(sc, &query->first_tlv);
590 bnx2x_vf_config_rss(struct bnx2x_softc *sc,
591 struct ecore_config_rss_params *params)
593 struct vf_rss_tlv *query;
594 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
597 query = &sc->vf2pf_mbox->query[0].update_rss;
599 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_UPDATE_RSS,
602 /* add list termination tlv */
603 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
604 BNX2X_VF_TLV_LIST_END,
605 sizeof(struct channel_list_end_tlv));
607 rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key));
608 query->rss_key_size = T_ETH_RSS_KEY;
610 rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
611 query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
613 query->rss_result_mask = params->rss_result_mask;
614 query->rss_flags = params->rss_flags;
616 bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
617 if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
618 PMD_DRV_LOG(ERR, "Failed to configure RSS");
622 bnx2x_vf_finalize(sc, &query->first_tlv);
628 bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
630 struct vf_set_q_filters_tlv *query;
631 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
634 query = &sc->vf2pf_mbox->query[0].set_q_filters;
635 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
639 query->flags = BNX2X_VF_RX_MASK_CHANGED;
641 switch (sc->rx_mode) {
642 case BNX2X_RX_MODE_NONE: /* no Rx */
643 query->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
645 case BNX2X_RX_MODE_NORMAL:
646 query->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
647 query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
648 query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
650 case BNX2X_RX_MODE_ALLMULTI:
651 query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
652 query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
653 query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
655 case BNX2X_RX_MODE_PROMISC:
656 query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
657 query->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
658 query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
661 PMD_DRV_LOG(ERR, "BAD rx mode (%d)", sc->rx_mode);
666 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
667 BNX2X_VF_TLV_LIST_END,
668 sizeof(struct channel_list_end_tlv));
670 bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
671 if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
672 PMD_DRV_LOG(ERR, "Failed to set RX mode");
677 bnx2x_vf_finalize(sc, &query->first_tlv);