4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
51 #include "i40e_logs.h"
52 #include "base/i40e_prototype.h"
53 #include "base/i40e_adminq_cmd.h"
54 #include "base/i40e_type.h"
55 #include "i40e_ethdev.h"
56 #include "i40e_rxtx.h"
58 #include "rte_pmd_i40e.h"
60 #define I40E_CFG_CRCSTRIP_DEFAULT 1
63 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
64 struct virtchnl_queue_select *qsel,
68 * Bind PF queues with VSI and VF.
71 i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
74 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
75 uint16_t vsi_id = vf->vsi->vsi_id;
76 uint16_t vf_id = vf->vf_idx;
77 uint16_t nb_qps = vf->vsi->nb_qps;
78 uint16_t qbase = vf->vsi->base_queue;
83 * VF should use scatter range queues. So, it needn't
84 * to set QBASE in this register.
86 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id),
87 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
89 /* Set to enable VFLAN_QTABLE[] registers valid */
90 I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
91 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
93 /* map PF queues to VF */
94 for (i = 0; i < nb_qps; i++) {
95 val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
96 I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
99 /* map PF queues to VSI */
100 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
101 if (2 * i > nb_qps - 1)
102 q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
106 if (2 * i + 1 > nb_qps - 1)
107 q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
109 q2 = qbase + 2 * i + 1;
111 val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
112 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
114 I40E_WRITE_FLUSH(hw);
121 * Proceed VF reset operation.
124 i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
129 uint16_t vf_id, abs_vf_id, vf_msix_num;
131 struct virtchnl_queue_select qsel;
137 hw = I40E_PF_TO_HW(vf->pf);
139 abs_vf_id = vf_id + hw->func_caps.vf_base_id;
141 /* Notify VF that we are in VFR progress */
142 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_INPROGRESS);
145 * If require a SW VF reset, a VFLR interrupt will be generated,
146 * this function will be called again. To avoid it,
147 * disable interrupt first.
150 vf->state = I40E_VF_INRESET;
151 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
152 val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
153 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
154 I40E_WRITE_FLUSH(hw);
157 #define VFRESET_MAX_WAIT_CNT 100
158 /* Wait until VF reset is done */
159 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
161 val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
162 if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
166 if (i >= VFRESET_MAX_WAIT_CNT) {
167 PMD_DRV_LOG(ERR, "VF reset timeout");
170 /* This is not first time to do reset, do cleanup job first */
173 memset(&qsel, 0, sizeof(qsel));
174 for (i = 0; i < vf->vsi->nb_qps; i++)
175 qsel.rx_queues |= 1 << i;
176 qsel.tx_queues = qsel.rx_queues;
177 ret = i40e_pf_host_switch_queues(vf, &qsel, false);
178 if (ret != I40E_SUCCESS) {
179 PMD_DRV_LOG(ERR, "Disable VF queues failed");
183 /* Disable VF interrupt setting */
184 vf_msix_num = hw->func_caps.num_msix_vectors_vf;
185 for (i = 0; i < vf_msix_num; i++) {
187 val = I40E_VFINT_DYN_CTL0(vf_id);
189 val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
191 I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
193 I40E_WRITE_FLUSH(hw);
196 ret = i40e_vsi_release(vf->vsi);
197 if (ret != I40E_SUCCESS) {
198 PMD_DRV_LOG(ERR, "Release VSI failed");
203 #define I40E_VF_PCI_ADDR 0xAA
204 #define I40E_VF_PEND_MASK 0x20
205 /* Check the pending transactions of this VF */
206 /* Use absolute VF id, refer to datasheet for details */
207 I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
208 (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
209 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
211 val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
212 if ((val & I40E_VF_PEND_MASK) == 0)
216 if (i >= VFRESET_MAX_WAIT_CNT) {
217 PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
221 /* Reset done, Set COMPLETE flag and clear reset bit */
222 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_COMPLETED);
223 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
224 val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
225 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
227 I40E_WRITE_FLUSH(hw);
229 /* Allocate resource again */
230 if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
231 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
234 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
235 vf->pf->main_vsi, vf->vf_idx);
238 if (vf->vsi == NULL) {
239 PMD_DRV_LOG(ERR, "Add vsi failed");
243 ret = i40e_pf_vf_queues_mapping(vf);
244 if (ret != I40E_SUCCESS) {
245 PMD_DRV_LOG(ERR, "queue mapping error");
246 i40e_vsi_release(vf->vsi);
250 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_VFACTIVE);
256 i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
262 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
263 uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
266 ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
269 PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
270 hw->aq.asq_last_status);
277 i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
279 struct virtchnl_version_info info;
281 /* Respond like a Linux PF host in order to support both DPDK VF and
282 * Linux VF driver. The expense is original DPDK host specific feature
283 * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
285 * DPDK VF also can't identify host driver by version number returned.
286 * It always assume talking with Linux PF.
288 info.major = VIRTCHNL_VERSION_MAJOR;
289 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
292 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
297 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
304 i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
306 i40e_pf_host_vf_reset(vf, 1);
308 /* No feedback will be sent to VF for VFLR */
313 i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
315 struct virtchnl_vf_resource *vf_res = NULL;
316 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
318 int ret = I40E_SUCCESS;
321 i40e_pf_host_send_msg_to_vf(vf,
322 VIRTCHNL_OP_GET_VF_RESOURCES,
323 I40E_NOT_SUPPORTED, NULL, 0);
327 /* only have 1 VSI by default */
328 len = sizeof(struct virtchnl_vf_resource) +
329 I40E_DEFAULT_VF_VSI_NUM *
330 sizeof(struct virtchnl_vsi_resource);
332 vf_res = rte_zmalloc("i40e_vf_res", len, 0);
333 if (vf_res == NULL) {
334 PMD_DRV_LOG(ERR, "failed to allocate mem");
335 ret = I40E_ERR_NO_MEMORY;
341 vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 |
342 VIRTCHNL_VF_OFFLOAD_VLAN;
343 vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
344 vf_res->num_queue_pairs = vf->vsi->nb_qps;
345 vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
347 /* Change below setting if PF host can support more VSIs for VF */
348 vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
349 vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
350 vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
351 ether_addr_copy(&vf->mac_addr,
352 (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
355 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
356 ret, (uint8_t *)vf_res, len);
363 i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
364 struct i40e_pf_vf *vf,
365 struct virtchnl_rxq_info *rxq,
368 int err = I40E_SUCCESS;
369 struct i40e_hmc_obj_rxq rx_ctx;
370 uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
372 /* Clear the context structure first */
373 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
374 rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
375 rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
376 rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
377 rx_ctx.qlen = rxq->ring_len;
378 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
382 if (rxq->splithdr_enabled) {
383 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
384 rx_ctx.dtype = i40e_header_split_enabled;
386 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
387 rx_ctx.dtype = i40e_header_split_none;
389 rx_ctx.rxmax = rxq->max_pkt_size;
390 rx_ctx.tphrdesc_ena = 1;
391 rx_ctx.tphwdesc_ena = 1;
392 rx_ctx.tphdata_ena = 1;
393 rx_ctx.tphhead_ena = 1;
394 rx_ctx.lrxqthresh = 2;
395 rx_ctx.crcstrip = crcstrip;
399 err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
400 if (err != I40E_SUCCESS)
402 err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
407 static inline uint8_t
408 i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
411 struct i40e_aqc_vsi_properties_data *info = &vsi->info;
412 uint16_t bsf, qp_idx;
415 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
416 if (vsi->enabled_tc & (1 << i)) {
417 qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] &
418 I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
419 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT);
420 bsf = rte_le_to_cpu_16((info->tc_mapping[i] &
421 I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
422 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
423 if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf))
431 i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
432 struct i40e_pf_vf *vf,
433 struct virtchnl_txq_info *txq)
435 int err = I40E_SUCCESS;
436 struct i40e_hmc_obj_txq tx_ctx;
437 struct i40e_vsi *vsi = vf->vsi;
439 uint16_t abs_queue_id = vsi->base_queue + txq->queue_id;
442 /* clear the context structure first */
443 memset(&tx_ctx, 0, sizeof(tx_ctx));
444 tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
445 tx_ctx.qlen = txq->ring_len;
446 dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id);
447 tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]);
448 tx_ctx.head_wb_ena = txq->headwb_enabled;
449 tx_ctx.head_wb_addr = txq->dma_headwb_addr;
451 err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
452 if (err != I40E_SUCCESS)
455 err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
456 if (err != I40E_SUCCESS)
459 /* bind queue with VF function, since TX/QX will appear in pair,
460 * so only has QTX_CTL to set.
462 qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
463 ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
464 I40E_QTX_CTL_PF_INDX_MASK) |
465 (((vf->vf_idx + hw->func_caps.vf_base_id) <<
466 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
467 I40E_QTX_CTL_VFVM_INDX_MASK);
468 I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
469 I40E_WRITE_FLUSH(hw);
475 i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
480 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
481 struct i40e_vsi *vsi = vf->vsi;
482 struct virtchnl_vsi_queue_config_info *vc_vqci =
483 (struct virtchnl_vsi_queue_config_info *)msg;
484 struct virtchnl_queue_pair_info *vc_qpi;
485 int i, ret = I40E_SUCCESS;
488 i40e_pf_host_send_msg_to_vf(vf,
489 VIRTCHNL_OP_CONFIG_VSI_QUEUES,
490 I40E_NOT_SUPPORTED, NULL, 0);
494 if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
495 vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
496 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
497 vc_vqci->num_queue_pairs)) {
498 PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
499 ret = I40E_ERR_PARAM;
503 vc_qpi = vc_vqci->qpair;
504 for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
505 if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
506 vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
507 ret = I40E_ERR_PARAM;
512 * Apply VF RX queue setting to HMC.
513 * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
514 * then the extra information of
515 * 'struct virtchnl_queue_pair_extra_info' is needed,
516 * otherwise set the last parameter to NULL.
518 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
519 I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
520 PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
521 ret = I40E_ERR_PARAM;
525 /* Apply VF TX queue setting to HMC */
526 if (i40e_pf_host_hmc_config_txq(hw, vf,
527 &vc_qpi[i].txq) != I40E_SUCCESS) {
528 PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
529 ret = I40E_ERR_PARAM;
535 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
542 i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
543 struct virtchnl_vector_map *vvm)
545 #define BITS_PER_CHAR 8
546 uint64_t linklistmap = 0, tempmap;
547 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
549 bool b_first_q = true;
550 enum i40e_queue_type qtype;
552 uint32_t reg, reg_idx;
553 uint16_t itr_idx = 0, i;
555 vector_id = vvm->vector_id;
558 reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx);
560 reg_idx = I40E_VPINT_LNKLSTN(
561 ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx)
564 if (vvm->rxq_map == 0 && vvm->txq_map == 0) {
565 I40E_WRITE_REG(hw, reg_idx,
566 I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
570 /* sort all rx and tx queues */
571 tempmap = vvm->rxq_map;
572 for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
574 linklistmap |= (1 << (2 * i));
578 tempmap = vvm->txq_map;
579 for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
581 linklistmap |= (1 << (2 * i + 1));
585 /* Link all rx and tx queues into a chained list */
586 tempmap = linklistmap;
591 qtype = (enum i40e_queue_type)(i % 2);
592 qid = vf->vsi->base_queue + i / 2;
597 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
600 /* element in the link list */
602 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
603 (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
604 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
605 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
607 I40E_WRITE_REG(hw, reg_idx, reg);
608 /* find next register to program */
610 case I40E_QUEUE_TYPE_RX:
611 reg_idx = I40E_QINT_RQCTL(qid);
612 itr_idx = vvm->rxitr_idx;
614 case I40E_QUEUE_TYPE_TX:
615 reg_idx = I40E_QINT_TQCTL(qid);
616 itr_idx = vvm->txitr_idx;
626 /* Terminate the link list */
628 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
629 (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
630 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
631 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
632 I40E_WRITE_REG(hw, reg_idx, reg);
635 I40E_WRITE_FLUSH(hw);
639 i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
640 uint8_t *msg, uint16_t msglen,
643 int ret = I40E_SUCCESS;
644 struct i40e_pf *pf = vf->pf;
645 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
646 struct virtchnl_irq_map_info *irqmap =
647 (struct virtchnl_irq_map_info *)msg;
648 struct virtchnl_vector_map *map;
650 uint16_t vector_id, itr_idx;
651 unsigned long qbit_max;
654 i40e_pf_host_send_msg_to_vf(
656 VIRTCHNL_OP_CONFIG_IRQ_MAP,
657 I40E_NOT_SUPPORTED, NULL, 0);
661 if (msg == NULL || msglen < sizeof(struct virtchnl_irq_map_info)) {
662 PMD_DRV_LOG(ERR, "buffer too short");
663 ret = I40E_ERR_PARAM;
667 /* PF host will support both DPDK VF or Linux VF driver, identify by
668 * number of vectors requested.
671 /* DPDK VF only requires single vector */
672 if (irqmap->num_vectors == 1) {
673 /* This MSIX intr store the intr in VF range */
674 vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
675 vf->vsi->nb_msix = irqmap->num_vectors;
676 vf->vsi->nb_used_qps = vf->vsi->nb_qps;
677 itr_idx = irqmap->vecmap[0].rxitr_idx;
679 /* Don't care how the TX/RX queue mapping with this vector.
680 * Link all VF RX queues together. Only did mapping work.
681 * VF can disable/enable the intr by itself.
683 i40e_vsi_queues_bind_intr(vf->vsi, itr_idx);
687 /* Then, it's Linux VF driver */
688 qbit_max = 1 << pf->vf_nb_qp_max;
689 for (i = 0; i < irqmap->num_vectors; i++) {
690 map = &irqmap->vecmap[i];
692 vector_id = map->vector_id;
693 /* validate msg params */
694 if (vector_id >= hw->func_caps.num_msix_vectors_vf) {
695 ret = I40E_ERR_PARAM;
699 if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
700 i40e_pf_config_irq_link_list(vf, map);
702 /* configured queue size excceed limit */
703 ret = I40E_ERR_PARAM;
709 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
716 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
717 struct virtchnl_queue_select *qsel,
720 int ret = I40E_SUCCESS;
722 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
723 uint16_t baseq = vf->vsi->base_queue;
725 if (qsel->rx_queues + qsel->tx_queues == 0)
726 return I40E_ERR_PARAM;
728 /* always enable RX first and disable last */
729 /* Enable RX if it's enable */
731 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
732 if (qsel->rx_queues & (1 << i)) {
733 ret = i40e_switch_rx_queue(hw, baseq + i, on);
734 if (ret != I40E_SUCCESS)
739 /* Enable/Disable TX */
740 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
741 if (qsel->tx_queues & (1 << i)) {
742 ret = i40e_switch_tx_queue(hw, baseq + i, on);
743 if (ret != I40E_SUCCESS)
747 /* disable RX last if it's disable */
750 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
751 if (qsel->rx_queues & (1 << i)) {
752 ret = i40e_switch_rx_queue(hw, baseq + i, on);
753 if (ret != I40E_SUCCESS)
762 i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
766 int ret = I40E_SUCCESS;
767 struct virtchnl_queue_select *q_sel =
768 (struct virtchnl_queue_select *)msg;
770 if (msg == NULL || msglen != sizeof(*q_sel)) {
771 ret = I40E_ERR_PARAM;
774 ret = i40e_pf_host_switch_queues(vf, q_sel, true);
777 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
784 i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
789 int ret = I40E_SUCCESS;
790 struct virtchnl_queue_select *q_sel =
791 (struct virtchnl_queue_select *)msg;
794 i40e_pf_host_send_msg_to_vf(
796 VIRTCHNL_OP_DISABLE_QUEUES,
797 I40E_NOT_SUPPORTED, NULL, 0);
801 if (msg == NULL || msglen != sizeof(*q_sel)) {
802 ret = I40E_ERR_PARAM;
805 ret = i40e_pf_host_switch_queues(vf, q_sel, false);
808 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
816 i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
821 int ret = I40E_SUCCESS;
822 struct virtchnl_ether_addr_list *addr_list =
823 (struct virtchnl_ether_addr_list *)msg;
824 struct i40e_mac_filter_info filter;
826 struct ether_addr *mac;
829 i40e_pf_host_send_msg_to_vf(
831 VIRTCHNL_OP_ADD_ETH_ADDR,
832 I40E_NOT_SUPPORTED, NULL, 0);
836 memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
838 if (msg == NULL || msglen <= sizeof(*addr_list)) {
839 PMD_DRV_LOG(ERR, "add_ether_address argument too short");
840 ret = I40E_ERR_PARAM;
844 for (i = 0; i < addr_list->num_elements; i++) {
845 mac = (struct ether_addr *)(addr_list->list[i].addr);
846 (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
847 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
848 if (is_zero_ether_addr(mac) ||
849 i40e_vsi_add_mac(vf->vsi, &filter)) {
850 ret = I40E_ERR_INVALID_MAC_ADDR;
856 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
863 i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
868 int ret = I40E_SUCCESS;
869 struct virtchnl_ether_addr_list *addr_list =
870 (struct virtchnl_ether_addr_list *)msg;
872 struct ether_addr *mac;
875 i40e_pf_host_send_msg_to_vf(
877 VIRTCHNL_OP_DEL_ETH_ADDR,
878 I40E_NOT_SUPPORTED, NULL, 0);
882 if (msg == NULL || msglen <= sizeof(*addr_list)) {
883 PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
884 ret = I40E_ERR_PARAM;
888 for (i = 0; i < addr_list->num_elements; i++) {
889 mac = (struct ether_addr *)(addr_list->list[i].addr);
890 if(is_zero_ether_addr(mac) ||
891 i40e_vsi_delete_mac(vf->vsi, mac)) {
892 ret = I40E_ERR_INVALID_MAC_ADDR;
898 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
905 i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
906 uint8_t *msg, uint16_t msglen,
909 int ret = I40E_SUCCESS;
910 struct virtchnl_vlan_filter_list *vlan_filter_list =
911 (struct virtchnl_vlan_filter_list *)msg;
916 i40e_pf_host_send_msg_to_vf(
918 VIRTCHNL_OP_ADD_VLAN,
919 I40E_NOT_SUPPORTED, NULL, 0);
923 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
924 PMD_DRV_LOG(ERR, "add_vlan argument too short");
925 ret = I40E_ERR_PARAM;
929 vid = vlan_filter_list->vlan_id;
931 for (i = 0; i < vlan_filter_list->num_elements; i++) {
932 ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
933 if(ret != I40E_SUCCESS)
938 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
945 i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
950 int ret = I40E_SUCCESS;
951 struct virtchnl_vlan_filter_list *vlan_filter_list =
952 (struct virtchnl_vlan_filter_list *)msg;
957 i40e_pf_host_send_msg_to_vf(
959 VIRTCHNL_OP_DEL_VLAN,
960 I40E_NOT_SUPPORTED, NULL, 0);
964 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
965 PMD_DRV_LOG(ERR, "delete_vlan argument too short");
966 ret = I40E_ERR_PARAM;
970 vid = vlan_filter_list->vlan_id;
971 for (i = 0; i < vlan_filter_list->num_elements; i++) {
972 ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
973 if(ret != I40E_SUCCESS)
978 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
985 i40e_pf_host_process_cmd_config_promisc_mode(
986 struct i40e_pf_vf *vf,
991 int ret = I40E_SUCCESS;
992 struct virtchnl_promisc_info *promisc =
993 (struct virtchnl_promisc_info *)msg;
994 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
995 bool unicast = FALSE, multicast = FALSE;
998 i40e_pf_host_send_msg_to_vf(
1000 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1001 I40E_NOT_SUPPORTED, NULL, 0);
1005 if (msg == NULL || msglen != sizeof(*promisc)) {
1006 ret = I40E_ERR_PARAM;
1010 if (promisc->flags & FLAG_VF_UNICAST_PROMISC)
1012 ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
1013 vf->vsi->seid, unicast, NULL, true);
1014 if (ret != I40E_SUCCESS)
1017 if (promisc->flags & FLAG_VF_MULTICAST_PROMISC)
1019 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
1023 i40e_pf_host_send_msg_to_vf(vf,
1024 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
1030 i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
1032 i40e_update_vsi_stats(vf->vsi);
1035 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
1037 (uint8_t *)&vf->vsi->eth_stats,
1038 sizeof(vf->vsi->eth_stats));
1040 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
1042 (uint8_t *)&vf->vsi->eth_stats,
1043 sizeof(vf->vsi->eth_stats));
1045 return I40E_SUCCESS;
1049 i40e_pf_host_process_cmd_enable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
1051 int ret = I40E_SUCCESS;
1054 i40e_pf_host_send_msg_to_vf(
1056 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1057 I40E_NOT_SUPPORTED, NULL, 0);
1061 ret = i40e_vsi_config_vlan_stripping(vf->vsi, TRUE);
1063 PMD_DRV_LOG(ERR, "Failed to enable vlan stripping");
1065 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1072 i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
1074 int ret = I40E_SUCCESS;
1077 i40e_pf_host_send_msg_to_vf(
1079 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1080 I40E_NOT_SUPPORTED, NULL, 0);
1084 ret = i40e_vsi_config_vlan_stripping(vf->vsi, FALSE);
1086 PMD_DRV_LOG(ERR, "Failed to disable vlan stripping");
1088 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1095 i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
1097 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
1098 struct virtchnl_pf_event event;
1099 uint16_t vf_id = vf->vf_idx;
1100 uint32_t tval, rval;
1102 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1103 event.event_data.link_event.link_status =
1104 dev->data->dev_link.link_status;
1106 /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
1107 switch (dev->data->dev_link.link_speed) {
1108 case ETH_SPEED_NUM_100M:
1109 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
1111 case ETH_SPEED_NUM_1G:
1112 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
1114 case ETH_SPEED_NUM_10G:
1115 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
1117 case ETH_SPEED_NUM_20G:
1118 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
1120 case ETH_SPEED_NUM_25G:
1121 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
1123 case ETH_SPEED_NUM_40G:
1124 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
1127 event.event_data.link_event.link_speed =
1128 VIRTCHNL_LINK_SPEED_UNKNOWN;
1132 tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id));
1133 rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id));
1135 if (tval & I40E_VF_ATQLEN_ATQLEN_MASK ||
1136 tval & I40E_VF_ATQLEN_ATQENABLE_MASK ||
1137 rval & I40E_VF_ARQLEN_ARQLEN_MASK ||
1138 rval & I40E_VF_ARQLEN_ARQENABLE_MASK)
1139 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
1140 I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
1144 i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
1145 uint16_t abs_vf_id, uint32_t opcode,
1146 __rte_unused uint32_t retval,
1150 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1151 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1152 struct i40e_pf_vf *vf;
1153 /* AdminQ will pass absolute VF id, transfer to internal vf id */
1154 uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
1155 struct rte_pmd_i40e_mb_event_param ret_param;
1158 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1159 PMD_DRV_LOG(ERR, "invalid argument");
1163 vf = &pf->vfs[vf_id];
1165 PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
1166 i40e_pf_host_send_msg_to_vf(vf, opcode,
1167 I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
1172 * initialise structure to send to user application
1173 * will return response from user in retval field
1175 ret_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
1176 ret_param.vfid = vf_id;
1177 ret_param.msg_type = opcode;
1178 ret_param.msg = (void *)msg;
1179 ret_param.msglen = msglen;
1182 * Ask user application if we're allowed to perform those functions.
1183 * If we get ret_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
1184 * then business as usual.
1185 * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
1186 * do nothing and send not_supported to VF. As PF must send a response
1187 * to VF and ACK/NACK is not defined.
1189 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
1191 if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
1192 PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
1198 case VIRTCHNL_OP_VERSION:
1199 PMD_DRV_LOG(INFO, "OP_VERSION received");
1200 i40e_pf_host_process_cmd_version(vf, b_op);
1202 case VIRTCHNL_OP_RESET_VF:
1203 PMD_DRV_LOG(INFO, "OP_RESET_VF received");
1204 i40e_pf_host_process_cmd_reset_vf(vf);
1206 case VIRTCHNL_OP_GET_VF_RESOURCES:
1207 PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
1208 i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
1210 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1211 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
1212 i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
1215 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1216 PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
1217 i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
1219 case VIRTCHNL_OP_ENABLE_QUEUES:
1220 PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
1222 i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
1223 i40e_notify_vf_link_status(dev, vf);
1225 i40e_pf_host_send_msg_to_vf(
1226 vf, VIRTCHNL_OP_ENABLE_QUEUES,
1227 I40E_NOT_SUPPORTED, NULL, 0);
1230 case VIRTCHNL_OP_DISABLE_QUEUES:
1231 PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
1232 i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
1234 case VIRTCHNL_OP_ADD_ETH_ADDR:
1235 PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
1236 i40e_pf_host_process_cmd_add_ether_address(vf, msg,
1239 case VIRTCHNL_OP_DEL_ETH_ADDR:
1240 PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
1241 i40e_pf_host_process_cmd_del_ether_address(vf, msg,
1244 case VIRTCHNL_OP_ADD_VLAN:
1245 PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
1246 i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
1248 case VIRTCHNL_OP_DEL_VLAN:
1249 PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
1250 i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
1252 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1253 PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
1254 i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
1257 case VIRTCHNL_OP_GET_STATS:
1258 PMD_DRV_LOG(INFO, "OP_GET_STATS received");
1259 i40e_pf_host_process_cmd_get_stats(vf, b_op);
1261 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1262 PMD_DRV_LOG(INFO, "OP_ENABLE_VLAN_STRIPPING received");
1263 i40e_pf_host_process_cmd_enable_vlan_strip(vf, b_op);
1265 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1266 PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
1267 i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
1269 /* Don't add command supported below, which will
1270 * return an error code.
1273 PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
1274 i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
1281 i40e_pf_host_init(struct rte_eth_dev *dev)
1283 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1284 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1288 PMD_INIT_FUNC_TRACE();
1291 * return if SRIOV not enabled, VF number not configured or
1292 * no queue assigned.
1294 if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
1295 return I40E_SUCCESS;
1297 /* Allocate memory to store VF structure */
1298 pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
1302 /* Disable irq0 for VFR event */
1303 i40e_pf_disable_irq0(hw);
1305 /* Disable VF link status interrupt */
1306 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1307 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1308 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1309 I40E_WRITE_FLUSH(hw);
1311 for (i = 0; i < pf->vf_num; i++) {
1313 pf->vfs[i].state = I40E_VF_INACTIVE;
1314 pf->vfs[i].vf_idx = i;
1315 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
1316 if (ret != I40E_SUCCESS)
1320 RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num;
1322 i40e_pf_enable_irq0(hw);
1324 return I40E_SUCCESS;
1328 i40e_pf_enable_irq0(hw);
1334 i40e_pf_host_uninit(struct rte_eth_dev *dev)
1336 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1337 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1340 PMD_INIT_FUNC_TRACE();
1343 * return if SRIOV not enabled, VF number not configured or
1344 * no queue assigned.
1346 if ((!hw->func_caps.sr_iov_1_1) ||
1347 (pf->vf_num == 0) ||
1348 (pf->vf_nb_qps == 0))
1349 return I40E_SUCCESS;
1351 /* free memory to store VF structure */
1355 /* Disable irq0 for VFR event */
1356 i40e_pf_disable_irq0(hw);
1358 /* Disable VF link status interrupt */
1359 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1360 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1361 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1362 I40E_WRITE_FLUSH(hw);
1364 return I40E_SUCCESS;