4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
51 #include "i40e_logs.h"
52 #include "base/i40e_prototype.h"
53 #include "base/i40e_adminq_cmd.h"
54 #include "base/i40e_type.h"
55 #include "i40e_ethdev.h"
56 #include "i40e_rxtx.h"
58 #include "rte_pmd_i40e.h"
60 #define I40E_CFG_CRCSTRIP_DEFAULT 1
63 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
64 struct virtchnl_queue_select *qsel,
68 * Bind PF queues with VSI and VF.
71 i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
74 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
75 uint16_t vsi_id = vf->vsi->vsi_id;
76 uint16_t vf_id = vf->vf_idx;
77 uint16_t nb_qps = vf->vsi->nb_qps;
78 uint16_t qbase = vf->vsi->base_queue;
83 * VF should use scatter range queues. So, it needn't
84 * to set QBASE in this register.
86 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id),
87 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
89 /* Set to enable VFLAN_QTABLE[] registers valid */
90 I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
91 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
93 /* map PF queues to VF */
94 for (i = 0; i < nb_qps; i++) {
95 val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
96 I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
99 /* map PF queues to VSI */
100 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
101 if (2 * i > nb_qps - 1)
102 q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
106 if (2 * i + 1 > nb_qps - 1)
107 q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
109 q2 = qbase + 2 * i + 1;
111 val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
112 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
114 I40E_WRITE_FLUSH(hw);
121 * Proceed VF reset operation.
124 i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
129 uint16_t vf_id, abs_vf_id, vf_msix_num;
131 struct virtchnl_queue_select qsel;
137 hw = I40E_PF_TO_HW(vf->pf);
139 abs_vf_id = vf_id + hw->func_caps.vf_base_id;
141 /* Notify VF that we are in VFR progress */
142 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_INPROGRESS);
145 * If require a SW VF reset, a VFLR interrupt will be generated,
146 * this function will be called again. To avoid it,
147 * disable interrupt first.
150 vf->state = I40E_VF_INRESET;
151 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
152 val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
153 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
154 I40E_WRITE_FLUSH(hw);
156 #define VFRESET_MAX_WAIT_CNT 100
157 /* Wait until VF reset is done */
158 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
160 val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
161 if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
165 if (i >= VFRESET_MAX_WAIT_CNT) {
166 PMD_DRV_LOG(ERR, "VF reset timeout");
169 vf->state = I40E_VF_ACTIVE;
171 /* This is not first time to do reset, do cleanup job first */
174 memset(&qsel, 0, sizeof(qsel));
175 for (i = 0; i < vf->vsi->nb_qps; i++)
176 qsel.rx_queues |= 1 << i;
177 qsel.tx_queues = qsel.rx_queues;
178 ret = i40e_pf_host_switch_queues(vf, &qsel, false);
179 if (ret != I40E_SUCCESS) {
180 PMD_DRV_LOG(ERR, "Disable VF queues failed");
184 /* Disable VF interrupt setting */
185 vf_msix_num = hw->func_caps.num_msix_vectors_vf;
186 for (i = 0; i < vf_msix_num; i++) {
188 val = I40E_VFINT_DYN_CTL0(vf_id);
190 val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
192 I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
194 I40E_WRITE_FLUSH(hw);
197 ret = i40e_vsi_release(vf->vsi);
198 if (ret != I40E_SUCCESS) {
199 PMD_DRV_LOG(ERR, "Release VSI failed");
204 #define I40E_VF_PCI_ADDR 0xAA
205 #define I40E_VF_PEND_MASK 0x20
206 /* Check the pending transactions of this VF */
207 /* Use absolute VF id, refer to datasheet for details */
208 I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
209 (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
210 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
212 val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
213 if ((val & I40E_VF_PEND_MASK) == 0)
217 if (i >= VFRESET_MAX_WAIT_CNT) {
218 PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
222 /* Reset done, Set COMPLETE flag and clear reset bit */
223 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_COMPLETED);
224 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
225 val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
226 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
228 I40E_WRITE_FLUSH(hw);
230 /* Allocate resource again */
231 if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
232 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
235 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
236 vf->pf->main_vsi, vf->vf_idx);
239 if (vf->vsi == NULL) {
240 PMD_DRV_LOG(ERR, "Add vsi failed");
244 ret = i40e_pf_vf_queues_mapping(vf);
245 if (ret != I40E_SUCCESS) {
246 PMD_DRV_LOG(ERR, "queue mapping error");
247 i40e_vsi_release(vf->vsi);
251 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_VFACTIVE);
257 i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
263 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
264 uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
265 int ret = I40E_ERR_ADMIN_QUEUE_ERROR;
267 if (vf->state == I40E_VF_INACTIVE)
270 ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
273 PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
274 hw->aq.asq_last_status);
281 i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
283 struct virtchnl_version_info info;
285 /* Respond like a Linux PF host in order to support both DPDK VF and
286 * Linux VF driver. The expense is original DPDK host specific feature
287 * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
289 * DPDK VF also can't identify host driver by version number returned.
290 * It always assume talking with Linux PF.
292 info.major = VIRTCHNL_VERSION_MAJOR;
293 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
296 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
301 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
308 i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
310 i40e_pf_host_vf_reset(vf, 1);
312 /* No feedback will be sent to VF for VFLR */
317 i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
319 struct virtchnl_vf_resource *vf_res = NULL;
320 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
322 int ret = I40E_SUCCESS;
325 i40e_pf_host_send_msg_to_vf(vf,
326 VIRTCHNL_OP_GET_VF_RESOURCES,
327 I40E_NOT_SUPPORTED, NULL, 0);
331 /* only have 1 VSI by default */
332 len = sizeof(struct virtchnl_vf_resource) +
333 I40E_DEFAULT_VF_VSI_NUM *
334 sizeof(struct virtchnl_vsi_resource);
336 vf_res = rte_zmalloc("i40e_vf_res", len, 0);
337 if (vf_res == NULL) {
338 PMD_DRV_LOG(ERR, "failed to allocate mem");
339 ret = I40E_ERR_NO_MEMORY;
345 vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 |
346 VIRTCHNL_VF_OFFLOAD_VLAN;
347 vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
348 vf_res->num_queue_pairs = vf->vsi->nb_qps;
349 vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
351 /* Change below setting if PF host can support more VSIs for VF */
352 vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
353 vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
354 vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
355 ether_addr_copy(&vf->mac_addr,
356 (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
359 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
360 ret, (uint8_t *)vf_res, len);
367 i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
368 struct i40e_pf_vf *vf,
369 struct virtchnl_rxq_info *rxq,
372 int err = I40E_SUCCESS;
373 struct i40e_hmc_obj_rxq rx_ctx;
374 uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
376 /* Clear the context structure first */
377 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
378 rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
379 rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
380 rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
381 rx_ctx.qlen = rxq->ring_len;
382 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
386 if (rxq->splithdr_enabled) {
387 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
388 rx_ctx.dtype = i40e_header_split_enabled;
390 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
391 rx_ctx.dtype = i40e_header_split_none;
393 rx_ctx.rxmax = rxq->max_pkt_size;
394 rx_ctx.tphrdesc_ena = 1;
395 rx_ctx.tphwdesc_ena = 1;
396 rx_ctx.tphdata_ena = 1;
397 rx_ctx.tphhead_ena = 1;
398 rx_ctx.lrxqthresh = 2;
399 rx_ctx.crcstrip = crcstrip;
403 err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
404 if (err != I40E_SUCCESS)
406 err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
411 static inline uint8_t
412 i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
415 struct i40e_aqc_vsi_properties_data *info = &vsi->info;
416 uint16_t bsf, qp_idx;
419 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
420 if (vsi->enabled_tc & (1 << i)) {
421 qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] &
422 I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
423 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT);
424 bsf = rte_le_to_cpu_16((info->tc_mapping[i] &
425 I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
426 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
427 if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf))
435 i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
436 struct i40e_pf_vf *vf,
437 struct virtchnl_txq_info *txq)
439 int err = I40E_SUCCESS;
440 struct i40e_hmc_obj_txq tx_ctx;
441 struct i40e_vsi *vsi = vf->vsi;
443 uint16_t abs_queue_id = vsi->base_queue + txq->queue_id;
446 /* clear the context structure first */
447 memset(&tx_ctx, 0, sizeof(tx_ctx));
448 tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
449 tx_ctx.qlen = txq->ring_len;
450 dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id);
451 tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]);
452 tx_ctx.head_wb_ena = txq->headwb_enabled;
453 tx_ctx.head_wb_addr = txq->dma_headwb_addr;
455 err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
456 if (err != I40E_SUCCESS)
459 err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
460 if (err != I40E_SUCCESS)
463 /* bind queue with VF function, since TX/QX will appear in pair,
464 * so only has QTX_CTL to set.
466 qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
467 ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
468 I40E_QTX_CTL_PF_INDX_MASK) |
469 (((vf->vf_idx + hw->func_caps.vf_base_id) <<
470 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
471 I40E_QTX_CTL_VFVM_INDX_MASK);
472 I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
473 I40E_WRITE_FLUSH(hw);
479 i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
484 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
485 struct i40e_vsi *vsi = vf->vsi;
486 struct virtchnl_vsi_queue_config_info *vc_vqci =
487 (struct virtchnl_vsi_queue_config_info *)msg;
488 struct virtchnl_queue_pair_info *vc_qpi;
489 int i, ret = I40E_SUCCESS;
492 i40e_pf_host_send_msg_to_vf(vf,
493 VIRTCHNL_OP_CONFIG_VSI_QUEUES,
494 I40E_NOT_SUPPORTED, NULL, 0);
498 if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
499 vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
500 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
501 vc_vqci->num_queue_pairs)) {
502 PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
503 ret = I40E_ERR_PARAM;
507 vc_qpi = vc_vqci->qpair;
508 for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
509 if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
510 vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
511 ret = I40E_ERR_PARAM;
516 * Apply VF RX queue setting to HMC.
517 * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
518 * then the extra information of
519 * 'struct virtchnl_queue_pair_extra_info' is needed,
520 * otherwise set the last parameter to NULL.
522 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
523 I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
524 PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
525 ret = I40E_ERR_PARAM;
529 /* Apply VF TX queue setting to HMC */
530 if (i40e_pf_host_hmc_config_txq(hw, vf,
531 &vc_qpi[i].txq) != I40E_SUCCESS) {
532 PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
533 ret = I40E_ERR_PARAM;
539 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
546 i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
551 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
552 struct i40e_vsi *vsi = vf->vsi;
553 struct virtchnl_vsi_queue_config_ext_info *vc_vqcei =
554 (struct virtchnl_vsi_queue_config_ext_info *)msg;
555 struct virtchnl_queue_pair_ext_info *vc_qpei;
556 int i, ret = I40E_SUCCESS;
559 i40e_pf_host_send_msg_to_vf(
561 VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
562 I40E_NOT_SUPPORTED, NULL, 0);
566 if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
567 vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
568 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
569 vc_vqcei->num_queue_pairs)) {
570 PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong");
571 ret = I40E_ERR_PARAM;
575 vc_qpei = vc_vqcei->qpair;
576 for (i = 0; i < vc_vqcei->num_queue_pairs; i++) {
577 if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 ||
578 vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) {
579 ret = I40E_ERR_PARAM;
583 * Apply VF RX queue setting to HMC.
584 * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
585 * then the extra information of
586 * 'struct virtchnl_queue_pair_ext_info' is needed,
587 * otherwise set the last parameter to NULL.
589 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
590 vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) {
591 PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
592 ret = I40E_ERR_PARAM;
596 /* Apply VF TX queue setting to HMC */
597 if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) !=
599 PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
600 ret = I40E_ERR_PARAM;
606 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
613 i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
614 struct virtchnl_vector_map *vvm)
616 #define BITS_PER_CHAR 8
617 uint64_t linklistmap = 0, tempmap;
618 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
620 bool b_first_q = true;
621 enum i40e_queue_type qtype;
623 uint32_t reg, reg_idx;
624 uint16_t itr_idx = 0, i;
626 vector_id = vvm->vector_id;
629 reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx);
631 reg_idx = I40E_VPINT_LNKLSTN(
632 ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx)
635 if (vvm->rxq_map == 0 && vvm->txq_map == 0) {
636 I40E_WRITE_REG(hw, reg_idx,
637 I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
641 /* sort all rx and tx queues */
642 tempmap = vvm->rxq_map;
643 for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
645 linklistmap |= (1 << (2 * i));
649 tempmap = vvm->txq_map;
650 for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
652 linklistmap |= (1 << (2 * i + 1));
656 /* Link all rx and tx queues into a chained list */
657 tempmap = linklistmap;
662 qtype = (enum i40e_queue_type)(i % 2);
663 qid = vf->vsi->base_queue + i / 2;
668 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
671 /* element in the link list */
673 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
674 (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
675 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
676 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
678 I40E_WRITE_REG(hw, reg_idx, reg);
679 /* find next register to program */
681 case I40E_QUEUE_TYPE_RX:
682 reg_idx = I40E_QINT_RQCTL(qid);
683 itr_idx = vvm->rxitr_idx;
685 case I40E_QUEUE_TYPE_TX:
686 reg_idx = I40E_QINT_TQCTL(qid);
687 itr_idx = vvm->txitr_idx;
697 /* Terminate the link list */
699 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
700 (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
701 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
702 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
703 I40E_WRITE_REG(hw, reg_idx, reg);
706 I40E_WRITE_FLUSH(hw);
710 i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
711 uint8_t *msg, uint16_t msglen,
714 int ret = I40E_SUCCESS;
715 struct i40e_pf *pf = vf->pf;
716 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
717 struct virtchnl_irq_map_info *irqmap =
718 (struct virtchnl_irq_map_info *)msg;
719 struct virtchnl_vector_map *map;
722 unsigned long qbit_max;
725 i40e_pf_host_send_msg_to_vf(
727 VIRTCHNL_OP_CONFIG_IRQ_MAP,
728 I40E_NOT_SUPPORTED, NULL, 0);
732 if (msg == NULL || msglen < sizeof(struct virtchnl_irq_map_info)) {
733 PMD_DRV_LOG(ERR, "buffer too short");
734 ret = I40E_ERR_PARAM;
738 /* PF host will support both DPDK VF or Linux VF driver, identify by
739 * number of vectors requested.
742 /* DPDK VF only requires single vector */
743 if (irqmap->num_vectors == 1) {
744 /* This MSIX intr store the intr in VF range */
745 vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
746 vf->vsi->nb_msix = irqmap->num_vectors;
747 vf->vsi->nb_used_qps = vf->vsi->nb_qps;
749 /* Don't care how the TX/RX queue mapping with this vector.
750 * Link all VF RX queues together. Only did mapping work.
751 * VF can disable/enable the intr by itself.
753 i40e_vsi_queues_bind_intr(vf->vsi);
757 /* Then, it's Linux VF driver */
758 qbit_max = 1 << pf->vf_nb_qp_max;
759 for (i = 0; i < irqmap->num_vectors; i++) {
760 map = &irqmap->vecmap[i];
762 vector_id = map->vector_id;
763 /* validate msg params */
764 if (vector_id >= hw->func_caps.num_msix_vectors_vf) {
765 ret = I40E_ERR_PARAM;
769 if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
770 i40e_pf_config_irq_link_list(vf, map);
772 /* configured queue size excceed limit */
773 ret = I40E_ERR_PARAM;
779 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
786 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
787 struct virtchnl_queue_select *qsel,
790 int ret = I40E_SUCCESS;
792 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
793 uint16_t baseq = vf->vsi->base_queue;
795 if (qsel->rx_queues + qsel->tx_queues == 0)
796 return I40E_ERR_PARAM;
798 /* always enable RX first and disable last */
799 /* Enable RX if it's enable */
801 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
802 if (qsel->rx_queues & (1 << i)) {
803 ret = i40e_switch_rx_queue(hw, baseq + i, on);
804 if (ret != I40E_SUCCESS)
809 /* Enable/Disable TX */
810 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
811 if (qsel->tx_queues & (1 << i)) {
812 ret = i40e_switch_tx_queue(hw, baseq + i, on);
813 if (ret != I40E_SUCCESS)
817 /* disable RX last if it's disable */
820 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
821 if (qsel->rx_queues & (1 << i)) {
822 ret = i40e_switch_rx_queue(hw, baseq + i, on);
823 if (ret != I40E_SUCCESS)
832 i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
836 int ret = I40E_SUCCESS;
837 struct virtchnl_queue_select *q_sel =
838 (struct virtchnl_queue_select *)msg;
840 if (msg == NULL || msglen != sizeof(*q_sel)) {
841 ret = I40E_ERR_PARAM;
844 ret = i40e_pf_host_switch_queues(vf, q_sel, true);
847 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
854 i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
859 int ret = I40E_SUCCESS;
860 struct virtchnl_queue_select *q_sel =
861 (struct virtchnl_queue_select *)msg;
864 i40e_pf_host_send_msg_to_vf(
866 VIRTCHNL_OP_DISABLE_QUEUES,
867 I40E_NOT_SUPPORTED, NULL, 0);
871 if (msg == NULL || msglen != sizeof(*q_sel)) {
872 ret = I40E_ERR_PARAM;
875 ret = i40e_pf_host_switch_queues(vf, q_sel, false);
878 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
886 i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
891 int ret = I40E_SUCCESS;
892 struct virtchnl_ether_addr_list *addr_list =
893 (struct virtchnl_ether_addr_list *)msg;
894 struct i40e_mac_filter_info filter;
896 struct ether_addr *mac;
899 i40e_pf_host_send_msg_to_vf(
901 VIRTCHNL_OP_ADD_ETH_ADDR,
902 I40E_NOT_SUPPORTED, NULL, 0);
906 memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
908 if (msg == NULL || msglen <= sizeof(*addr_list)) {
909 PMD_DRV_LOG(ERR, "add_ether_address argument too short");
910 ret = I40E_ERR_PARAM;
914 for (i = 0; i < addr_list->num_elements; i++) {
915 mac = (struct ether_addr *)(addr_list->list[i].addr);
916 (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
917 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
918 if (is_zero_ether_addr(mac) ||
919 i40e_vsi_add_mac(vf->vsi, &filter)) {
920 ret = I40E_ERR_INVALID_MAC_ADDR;
926 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
933 i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
938 int ret = I40E_SUCCESS;
939 struct virtchnl_ether_addr_list *addr_list =
940 (struct virtchnl_ether_addr_list *)msg;
942 struct ether_addr *mac;
945 i40e_pf_host_send_msg_to_vf(
947 VIRTCHNL_OP_DEL_ETH_ADDR,
948 I40E_NOT_SUPPORTED, NULL, 0);
952 if (msg == NULL || msglen <= sizeof(*addr_list)) {
953 PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
954 ret = I40E_ERR_PARAM;
958 for (i = 0; i < addr_list->num_elements; i++) {
959 mac = (struct ether_addr *)(addr_list->list[i].addr);
960 if(is_zero_ether_addr(mac) ||
961 i40e_vsi_delete_mac(vf->vsi, mac)) {
962 ret = I40E_ERR_INVALID_MAC_ADDR;
968 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
975 i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
976 uint8_t *msg, uint16_t msglen,
979 int ret = I40E_SUCCESS;
980 struct virtchnl_vlan_filter_list *vlan_filter_list =
981 (struct virtchnl_vlan_filter_list *)msg;
986 i40e_pf_host_send_msg_to_vf(
988 VIRTCHNL_OP_ADD_VLAN,
989 I40E_NOT_SUPPORTED, NULL, 0);
993 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
994 PMD_DRV_LOG(ERR, "add_vlan argument too short");
995 ret = I40E_ERR_PARAM;
999 vid = vlan_filter_list->vlan_id;
1001 for (i = 0; i < vlan_filter_list->num_elements; i++) {
1002 ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
1003 if(ret != I40E_SUCCESS)
1008 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
1015 i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
1020 int ret = I40E_SUCCESS;
1021 struct virtchnl_vlan_filter_list *vlan_filter_list =
1022 (struct virtchnl_vlan_filter_list *)msg;
1027 i40e_pf_host_send_msg_to_vf(
1029 VIRTCHNL_OP_DEL_VLAN,
1030 I40E_NOT_SUPPORTED, NULL, 0);
1034 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
1035 PMD_DRV_LOG(ERR, "delete_vlan argument too short");
1036 ret = I40E_ERR_PARAM;
1040 vid = vlan_filter_list->vlan_id;
1041 for (i = 0; i < vlan_filter_list->num_elements; i++) {
1042 ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
1043 if(ret != I40E_SUCCESS)
1048 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
1055 i40e_pf_host_process_cmd_config_promisc_mode(
1056 struct i40e_pf_vf *vf,
1061 int ret = I40E_SUCCESS;
1062 struct virtchnl_promisc_info *promisc =
1063 (struct virtchnl_promisc_info *)msg;
1064 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
1065 bool unicast = FALSE, multicast = FALSE;
1068 i40e_pf_host_send_msg_to_vf(
1070 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1071 I40E_NOT_SUPPORTED, NULL, 0);
1075 if (msg == NULL || msglen != sizeof(*promisc)) {
1076 ret = I40E_ERR_PARAM;
1080 if (promisc->flags & FLAG_VF_UNICAST_PROMISC)
1082 ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
1083 vf->vsi->seid, unicast, NULL, true);
1084 if (ret != I40E_SUCCESS)
1087 if (promisc->flags & FLAG_VF_MULTICAST_PROMISC)
1089 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
1093 i40e_pf_host_send_msg_to_vf(vf,
1094 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
1100 i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
1102 i40e_update_vsi_stats(vf->vsi);
1105 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
1107 (uint8_t *)&vf->vsi->eth_stats,
1108 sizeof(vf->vsi->eth_stats));
1110 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
1112 (uint8_t *)&vf->vsi->eth_stats,
1113 sizeof(vf->vsi->eth_stats));
1115 return I40E_SUCCESS;
1119 i40e_pf_host_process_cmd_enable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
1121 int ret = I40E_SUCCESS;
1124 i40e_pf_host_send_msg_to_vf(
1126 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1127 I40E_NOT_SUPPORTED, NULL, 0);
1131 ret = i40e_vsi_config_vlan_stripping(vf->vsi, TRUE);
1133 PMD_DRV_LOG(ERR, "Failed to enable vlan stripping");
1135 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1142 i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
1144 int ret = I40E_SUCCESS;
1147 i40e_pf_host_send_msg_to_vf(
1149 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1150 I40E_NOT_SUPPORTED, NULL, 0);
1154 ret = i40e_vsi_config_vlan_stripping(vf->vsi, FALSE);
1156 PMD_DRV_LOG(ERR, "Failed to disable vlan stripping");
1158 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1165 i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
1170 int ret = I40E_SUCCESS;
1171 struct virtchnl_pvid_info *tpid_info =
1172 (struct virtchnl_pvid_info *)msg;
1175 i40e_pf_host_send_msg_to_vf(
1177 I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
1178 I40E_NOT_SUPPORTED, NULL, 0);
1182 if (msg == NULL || msglen != sizeof(*tpid_info)) {
1183 ret = I40E_ERR_PARAM;
1187 ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info);
1190 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
1197 i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
1199 struct virtchnl_pf_event event;
1201 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1202 event.event_data.link_event.link_status =
1203 dev->data->dev_link.link_status;
1205 /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
1206 switch (dev->data->dev_link.link_speed) {
1207 case ETH_SPEED_NUM_100M:
1208 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
1210 case ETH_SPEED_NUM_1G:
1211 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
1213 case ETH_SPEED_NUM_10G:
1214 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
1216 case ETH_SPEED_NUM_20G:
1217 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
1219 case ETH_SPEED_NUM_25G:
1220 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
1222 case ETH_SPEED_NUM_40G:
1223 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
1226 event.event_data.link_event.link_speed =
1227 VIRTCHNL_LINK_SPEED_UNKNOWN;
1231 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
1232 I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
1236 i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
1237 uint16_t abs_vf_id, uint32_t opcode,
1238 __rte_unused uint32_t retval,
1242 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1243 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1244 struct i40e_pf_vf *vf;
1245 /* AdminQ will pass absolute VF id, transfer to internal vf id */
1246 uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
1247 struct rte_pmd_i40e_mb_event_param ret_param;
1250 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1251 PMD_DRV_LOG(ERR, "invalid argument");
1255 vf = &pf->vfs[vf_id];
1257 PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
1258 i40e_pf_host_send_msg_to_vf(vf, opcode,
1259 I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
1264 * initialise structure to send to user application
1265 * will return response from user in retval field
1267 ret_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
1268 ret_param.vfid = vf_id;
1269 ret_param.msg_type = opcode;
1270 ret_param.msg = (void *)msg;
1271 ret_param.msglen = msglen;
1274 * Ask user application if we're allowed to perform those functions.
1275 * If we get ret_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
1276 * then business as usual.
1277 * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
1278 * do nothing and send not_supported to VF. As PF must send a response
1279 * to VF and ACK/NACK is not defined.
1281 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
1283 if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
1284 PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
1290 case VIRTCHNL_OP_VERSION:
1291 PMD_DRV_LOG(INFO, "OP_VERSION received");
1292 i40e_pf_host_process_cmd_version(vf, b_op);
1294 case VIRTCHNL_OP_RESET_VF:
1295 PMD_DRV_LOG(INFO, "OP_RESET_VF received");
1296 i40e_pf_host_process_cmd_reset_vf(vf);
1298 case VIRTCHNL_OP_GET_VF_RESOURCES:
1299 PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
1300 i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
1302 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1303 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
1304 i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
1307 case VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
1308 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
1309 i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
1312 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1313 PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
1314 i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
1316 case VIRTCHNL_OP_ENABLE_QUEUES:
1317 PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
1319 i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
1320 i40e_notify_vf_link_status(dev, vf);
1322 i40e_pf_host_send_msg_to_vf(
1323 vf, VIRTCHNL_OP_ENABLE_QUEUES,
1324 I40E_NOT_SUPPORTED, NULL, 0);
1327 case VIRTCHNL_OP_DISABLE_QUEUES:
1328 PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
1329 i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
1331 case VIRTCHNL_OP_ADD_ETH_ADDR:
1332 PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
1333 i40e_pf_host_process_cmd_add_ether_address(vf, msg,
1336 case VIRTCHNL_OP_DEL_ETH_ADDR:
1337 PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
1338 i40e_pf_host_process_cmd_del_ether_address(vf, msg,
1341 case VIRTCHNL_OP_ADD_VLAN:
1342 PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
1343 i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
1345 case VIRTCHNL_OP_DEL_VLAN:
1346 PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
1347 i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
1349 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1350 PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
1351 i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
1354 case VIRTCHNL_OP_GET_STATS:
1355 PMD_DRV_LOG(INFO, "OP_GET_STATS received");
1356 i40e_pf_host_process_cmd_get_stats(vf, b_op);
1358 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1359 PMD_DRV_LOG(INFO, "OP_ENABLE_VLAN_STRIPPING received");
1360 i40e_pf_host_process_cmd_enable_vlan_strip(vf, b_op);
1362 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1363 PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
1364 i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
1366 case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
1367 PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
1368 i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op);
1370 /* Don't add command supported below, which will
1371 * return an error code.
1374 PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
1375 i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
1382 i40e_pf_host_init(struct rte_eth_dev *dev)
1384 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1385 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1389 PMD_INIT_FUNC_TRACE();
1392 * return if SRIOV not enabled, VF number not configured or
1393 * no queue assigned.
1395 if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
1396 return I40E_SUCCESS;
1398 /* Allocate memory to store VF structure */
1399 pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
1403 /* Disable irq0 for VFR event */
1404 i40e_pf_disable_irq0(hw);
1406 /* Disable VF link status interrupt */
1407 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1408 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1409 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1410 I40E_WRITE_FLUSH(hw);
1412 for (i = 0; i < pf->vf_num; i++) {
1414 pf->vfs[i].state = I40E_VF_INACTIVE;
1415 pf->vfs[i].vf_idx = i;
1416 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
1417 if (ret != I40E_SUCCESS)
1421 RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num;
1423 i40e_pf_enable_irq0(hw);
1425 return I40E_SUCCESS;
1429 i40e_pf_enable_irq0(hw);
1435 i40e_pf_host_uninit(struct rte_eth_dev *dev)
1437 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1438 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1441 PMD_INIT_FUNC_TRACE();
1444 * return if SRIOV not enabled, VF number not configured or
1445 * no queue assigned.
1447 if ((!hw->func_caps.sr_iov_1_1) ||
1448 (pf->vf_num == 0) ||
1449 (pf->vf_nb_qps == 0))
1450 return I40E_SUCCESS;
1452 /* free memory to store VF structure */
1456 /* Disable irq0 for VFR event */
1457 i40e_pf_disable_irq0(hw);
1459 /* Disable VF link status interrupt */
1460 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1461 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1462 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1463 I40E_WRITE_FLUSH(hw);
1465 return I40E_SUCCESS;