4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_malloc.h>
48 #include <rte_memcpy.h>
50 #include "i40e_logs.h"
51 #include "base/i40e_prototype.h"
52 #include "base/i40e_adminq_cmd.h"
53 #include "base/i40e_type.h"
54 #include "i40e_ethdev.h"
55 #include "i40e_rxtx.h"
57 #include "rte_pmd_i40e.h"
59 #define I40E_CFG_CRCSTRIP_DEFAULT 1
62 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
63 struct virtchnl_queue_select *qsel,
67 * Bind PF queues with VSI and VF.
70 i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
73 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
74 uint16_t vsi_id = vf->vsi->vsi_id;
75 uint16_t vf_id = vf->vf_idx;
76 uint16_t nb_qps = vf->vsi->nb_qps;
77 uint16_t qbase = vf->vsi->base_queue;
82 * VF should use scatter range queues. So, it needn't
83 * to set QBASE in this register.
85 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id),
86 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
88 /* Set to enable VFLAN_QTABLE[] registers valid */
89 I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
90 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
92 /* map PF queues to VF */
93 for (i = 0; i < nb_qps; i++) {
94 val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
95 I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
98 /* map PF queues to VSI */
99 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
100 if (2 * i > nb_qps - 1)
101 q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
105 if (2 * i + 1 > nb_qps - 1)
106 q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
108 q2 = qbase + 2 * i + 1;
110 val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
111 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
113 I40E_WRITE_FLUSH(hw);
120 * Proceed VF reset operation.
123 i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
128 uint16_t vf_id, abs_vf_id, vf_msix_num;
130 struct virtchnl_queue_select qsel;
136 hw = I40E_PF_TO_HW(vf->pf);
138 abs_vf_id = vf_id + hw->func_caps.vf_base_id;
140 /* Notify VF that we are in VFR progress */
141 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_INPROGRESS);
144 * If require a SW VF reset, a VFLR interrupt will be generated,
145 * this function will be called again. To avoid it,
146 * disable interrupt first.
149 vf->state = I40E_VF_INRESET;
150 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
151 val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
152 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
153 I40E_WRITE_FLUSH(hw);
156 #define VFRESET_MAX_WAIT_CNT 100
157 /* Wait until VF reset is done */
158 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
160 val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
161 if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
165 if (i >= VFRESET_MAX_WAIT_CNT) {
166 PMD_DRV_LOG(ERR, "VF reset timeout");
169 /* This is not first time to do reset, do cleanup job first */
172 memset(&qsel, 0, sizeof(qsel));
173 for (i = 0; i < vf->vsi->nb_qps; i++)
174 qsel.rx_queues |= 1 << i;
175 qsel.tx_queues = qsel.rx_queues;
176 ret = i40e_pf_host_switch_queues(vf, &qsel, false);
177 if (ret != I40E_SUCCESS) {
178 PMD_DRV_LOG(ERR, "Disable VF queues failed");
182 /* Disable VF interrupt setting */
183 vf_msix_num = hw->func_caps.num_msix_vectors_vf;
184 for (i = 0; i < vf_msix_num; i++) {
186 val = I40E_VFINT_DYN_CTL0(vf_id);
188 val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
190 I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
192 I40E_WRITE_FLUSH(hw);
195 ret = i40e_vsi_release(vf->vsi);
196 if (ret != I40E_SUCCESS) {
197 PMD_DRV_LOG(ERR, "Release VSI failed");
202 #define I40E_VF_PCI_ADDR 0xAA
203 #define I40E_VF_PEND_MASK 0x20
204 /* Check the pending transactions of this VF */
205 /* Use absolute VF id, refer to datasheet for details */
206 I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
207 (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
208 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
210 val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
211 if ((val & I40E_VF_PEND_MASK) == 0)
215 if (i >= VFRESET_MAX_WAIT_CNT) {
216 PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
220 /* Reset done, Set COMPLETE flag and clear reset bit */
221 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_COMPLETED);
222 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
223 val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
224 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
226 I40E_WRITE_FLUSH(hw);
228 /* Allocate resource again */
229 if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
230 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
233 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
234 vf->pf->main_vsi, vf->vf_idx);
237 if (vf->vsi == NULL) {
238 PMD_DRV_LOG(ERR, "Add vsi failed");
242 ret = i40e_pf_vf_queues_mapping(vf);
243 if (ret != I40E_SUCCESS) {
244 PMD_DRV_LOG(ERR, "queue mapping error");
245 i40e_vsi_release(vf->vsi);
249 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_VFACTIVE);
255 i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
261 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
262 uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
265 ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
268 PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
269 hw->aq.asq_last_status);
276 i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
278 struct virtchnl_version_info info;
280 /* Respond like a Linux PF host in order to support both DPDK VF and
281 * Linux VF driver. The expense is original DPDK host specific feature
282 * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
284 * DPDK VF also can't identify host driver by version number returned.
285 * It always assume talking with Linux PF.
287 info.major = VIRTCHNL_VERSION_MAJOR;
288 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
291 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
296 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
303 i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
305 i40e_pf_host_vf_reset(vf, 1);
307 /* No feedback will be sent to VF for VFLR */
312 i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
314 struct virtchnl_vf_resource *vf_res = NULL;
315 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
317 int ret = I40E_SUCCESS;
320 i40e_pf_host_send_msg_to_vf(vf,
321 VIRTCHNL_OP_GET_VF_RESOURCES,
322 I40E_NOT_SUPPORTED, NULL, 0);
326 /* only have 1 VSI by default */
327 len = sizeof(struct virtchnl_vf_resource) +
328 I40E_DEFAULT_VF_VSI_NUM *
329 sizeof(struct virtchnl_vsi_resource);
331 vf_res = rte_zmalloc("i40e_vf_res", len, 0);
332 if (vf_res == NULL) {
333 PMD_DRV_LOG(ERR, "failed to allocate mem");
334 ret = I40E_ERR_NO_MEMORY;
340 vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 |
341 VIRTCHNL_VF_OFFLOAD_VLAN;
342 vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
343 vf_res->num_queue_pairs = vf->vsi->nb_qps;
344 vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
346 /* Change below setting if PF host can support more VSIs for VF */
347 vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
348 vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
349 vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
350 ether_addr_copy(&vf->mac_addr,
351 (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
354 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
355 ret, (uint8_t *)vf_res, len);
362 i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
363 struct i40e_pf_vf *vf,
364 struct virtchnl_rxq_info *rxq,
367 int err = I40E_SUCCESS;
368 struct i40e_hmc_obj_rxq rx_ctx;
369 uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
371 /* Clear the context structure first */
372 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
373 rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
374 rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
375 rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
376 rx_ctx.qlen = rxq->ring_len;
377 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
381 if (rxq->splithdr_enabled) {
382 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
383 rx_ctx.dtype = i40e_header_split_enabled;
385 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
386 rx_ctx.dtype = i40e_header_split_none;
388 rx_ctx.rxmax = rxq->max_pkt_size;
389 rx_ctx.tphrdesc_ena = 1;
390 rx_ctx.tphwdesc_ena = 1;
391 rx_ctx.tphdata_ena = 1;
392 rx_ctx.tphhead_ena = 1;
393 rx_ctx.lrxqthresh = 2;
394 rx_ctx.crcstrip = crcstrip;
398 err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
399 if (err != I40E_SUCCESS)
401 err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
406 static inline uint8_t
407 i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
410 struct i40e_aqc_vsi_properties_data *info = &vsi->info;
411 uint16_t bsf, qp_idx;
414 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
415 if (vsi->enabled_tc & (1 << i)) {
416 qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] &
417 I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
418 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT);
419 bsf = rte_le_to_cpu_16((info->tc_mapping[i] &
420 I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
421 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
422 if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf))
430 i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
431 struct i40e_pf_vf *vf,
432 struct virtchnl_txq_info *txq)
434 int err = I40E_SUCCESS;
435 struct i40e_hmc_obj_txq tx_ctx;
436 struct i40e_vsi *vsi = vf->vsi;
438 uint16_t abs_queue_id = vsi->base_queue + txq->queue_id;
441 /* clear the context structure first */
442 memset(&tx_ctx, 0, sizeof(tx_ctx));
443 tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
444 tx_ctx.qlen = txq->ring_len;
445 dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id);
446 tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]);
447 tx_ctx.head_wb_ena = txq->headwb_enabled;
448 tx_ctx.head_wb_addr = txq->dma_headwb_addr;
450 err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
451 if (err != I40E_SUCCESS)
454 err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
455 if (err != I40E_SUCCESS)
458 /* bind queue with VF function, since TX/QX will appear in pair,
459 * so only has QTX_CTL to set.
461 qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
462 ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
463 I40E_QTX_CTL_PF_INDX_MASK) |
464 (((vf->vf_idx + hw->func_caps.vf_base_id) <<
465 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
466 I40E_QTX_CTL_VFVM_INDX_MASK);
467 I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
468 I40E_WRITE_FLUSH(hw);
474 i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
479 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
480 struct i40e_vsi *vsi = vf->vsi;
481 struct virtchnl_vsi_queue_config_info *vc_vqci =
482 (struct virtchnl_vsi_queue_config_info *)msg;
483 struct virtchnl_queue_pair_info *vc_qpi;
484 int i, ret = I40E_SUCCESS;
487 i40e_pf_host_send_msg_to_vf(vf,
488 VIRTCHNL_OP_CONFIG_VSI_QUEUES,
489 I40E_NOT_SUPPORTED, NULL, 0);
493 if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
494 vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
495 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
496 vc_vqci->num_queue_pairs)) {
497 PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
498 ret = I40E_ERR_PARAM;
502 vc_qpi = vc_vqci->qpair;
503 for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
504 if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
505 vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
506 ret = I40E_ERR_PARAM;
511 * Apply VF RX queue setting to HMC.
512 * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
513 * then the extra information of
514 * 'struct virtchnl_queue_pair_extra_info' is needed,
515 * otherwise set the last parameter to NULL.
517 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
518 I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
519 PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
520 ret = I40E_ERR_PARAM;
524 /* Apply VF TX queue setting to HMC */
525 if (i40e_pf_host_hmc_config_txq(hw, vf,
526 &vc_qpi[i].txq) != I40E_SUCCESS) {
527 PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
528 ret = I40E_ERR_PARAM;
534 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
541 i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
542 struct virtchnl_vector_map *vvm)
544 #define BITS_PER_CHAR 8
545 uint64_t linklistmap = 0, tempmap;
546 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
548 bool b_first_q = true;
549 enum i40e_queue_type qtype;
551 uint32_t reg, reg_idx;
552 uint16_t itr_idx = 0, i;
554 vector_id = vvm->vector_id;
557 reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx);
559 reg_idx = I40E_VPINT_LNKLSTN(
560 ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx)
563 if (vvm->rxq_map == 0 && vvm->txq_map == 0) {
564 I40E_WRITE_REG(hw, reg_idx,
565 I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
569 /* sort all rx and tx queues */
570 tempmap = vvm->rxq_map;
571 for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
573 linklistmap |= (1 << (2 * i));
577 tempmap = vvm->txq_map;
578 for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
580 linklistmap |= (1 << (2 * i + 1));
584 /* Link all rx and tx queues into a chained list */
585 tempmap = linklistmap;
590 qtype = (enum i40e_queue_type)(i % 2);
591 qid = vf->vsi->base_queue + i / 2;
596 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
599 /* element in the link list */
601 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
602 (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
603 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
604 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
606 I40E_WRITE_REG(hw, reg_idx, reg);
607 /* find next register to program */
609 case I40E_QUEUE_TYPE_RX:
610 reg_idx = I40E_QINT_RQCTL(qid);
611 itr_idx = vvm->rxitr_idx;
613 case I40E_QUEUE_TYPE_TX:
614 reg_idx = I40E_QINT_TQCTL(qid);
615 itr_idx = vvm->txitr_idx;
625 /* Terminate the link list */
627 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
628 (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
629 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
630 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
631 I40E_WRITE_REG(hw, reg_idx, reg);
634 I40E_WRITE_FLUSH(hw);
638 i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
639 uint8_t *msg, uint16_t msglen,
642 int ret = I40E_SUCCESS;
643 struct i40e_pf *pf = vf->pf;
644 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
645 struct virtchnl_irq_map_info *irqmap =
646 (struct virtchnl_irq_map_info *)msg;
647 struct virtchnl_vector_map *map;
649 uint16_t vector_id, itr_idx;
650 unsigned long qbit_max;
653 i40e_pf_host_send_msg_to_vf(
655 VIRTCHNL_OP_CONFIG_IRQ_MAP,
656 I40E_NOT_SUPPORTED, NULL, 0);
660 if (msg == NULL || msglen < sizeof(struct virtchnl_irq_map_info)) {
661 PMD_DRV_LOG(ERR, "buffer too short");
662 ret = I40E_ERR_PARAM;
666 /* PF host will support both DPDK VF or Linux VF driver, identify by
667 * number of vectors requested.
670 /* DPDK VF only requires single vector */
671 if (irqmap->num_vectors == 1) {
672 /* This MSIX intr store the intr in VF range */
673 vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
674 vf->vsi->nb_msix = irqmap->num_vectors;
675 vf->vsi->nb_used_qps = vf->vsi->nb_qps;
676 itr_idx = irqmap->vecmap[0].rxitr_idx;
678 /* Don't care how the TX/RX queue mapping with this vector.
679 * Link all VF RX queues together. Only did mapping work.
680 * VF can disable/enable the intr by itself.
682 i40e_vsi_queues_bind_intr(vf->vsi, itr_idx);
686 /* Then, it's Linux VF driver */
687 qbit_max = 1 << pf->vf_nb_qp_max;
688 for (i = 0; i < irqmap->num_vectors; i++) {
689 map = &irqmap->vecmap[i];
691 vector_id = map->vector_id;
692 /* validate msg params */
693 if (vector_id >= hw->func_caps.num_msix_vectors_vf) {
694 ret = I40E_ERR_PARAM;
698 if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
699 i40e_pf_config_irq_link_list(vf, map);
701 /* configured queue size excceed limit */
702 ret = I40E_ERR_PARAM;
708 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
715 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
716 struct virtchnl_queue_select *qsel,
719 int ret = I40E_SUCCESS;
721 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
722 uint16_t baseq = vf->vsi->base_queue;
724 if (qsel->rx_queues + qsel->tx_queues == 0)
725 return I40E_ERR_PARAM;
727 /* always enable RX first and disable last */
728 /* Enable RX if it's enable */
730 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
731 if (qsel->rx_queues & (1 << i)) {
732 ret = i40e_switch_rx_queue(hw, baseq + i, on);
733 if (ret != I40E_SUCCESS)
738 /* Enable/Disable TX */
739 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
740 if (qsel->tx_queues & (1 << i)) {
741 ret = i40e_switch_tx_queue(hw, baseq + i, on);
742 if (ret != I40E_SUCCESS)
746 /* disable RX last if it's disable */
749 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
750 if (qsel->rx_queues & (1 << i)) {
751 ret = i40e_switch_rx_queue(hw, baseq + i, on);
752 if (ret != I40E_SUCCESS)
761 i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
765 int ret = I40E_SUCCESS;
766 struct virtchnl_queue_select *q_sel =
767 (struct virtchnl_queue_select *)msg;
769 if (msg == NULL || msglen != sizeof(*q_sel)) {
770 ret = I40E_ERR_PARAM;
773 ret = i40e_pf_host_switch_queues(vf, q_sel, true);
776 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
783 i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
788 int ret = I40E_SUCCESS;
789 struct virtchnl_queue_select *q_sel =
790 (struct virtchnl_queue_select *)msg;
793 i40e_pf_host_send_msg_to_vf(
795 VIRTCHNL_OP_DISABLE_QUEUES,
796 I40E_NOT_SUPPORTED, NULL, 0);
800 if (msg == NULL || msglen != sizeof(*q_sel)) {
801 ret = I40E_ERR_PARAM;
804 ret = i40e_pf_host_switch_queues(vf, q_sel, false);
807 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
815 i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
820 int ret = I40E_SUCCESS;
821 struct virtchnl_ether_addr_list *addr_list =
822 (struct virtchnl_ether_addr_list *)msg;
823 struct i40e_mac_filter_info filter;
825 struct ether_addr *mac;
828 i40e_pf_host_send_msg_to_vf(
830 VIRTCHNL_OP_ADD_ETH_ADDR,
831 I40E_NOT_SUPPORTED, NULL, 0);
835 memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
837 if (msg == NULL || msglen <= sizeof(*addr_list)) {
838 PMD_DRV_LOG(ERR, "add_ether_address argument too short");
839 ret = I40E_ERR_PARAM;
843 for (i = 0; i < addr_list->num_elements; i++) {
844 mac = (struct ether_addr *)(addr_list->list[i].addr);
845 rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
846 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
847 if (is_zero_ether_addr(mac) ||
848 i40e_vsi_add_mac(vf->vsi, &filter)) {
849 ret = I40E_ERR_INVALID_MAC_ADDR;
855 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
862 i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
867 int ret = I40E_SUCCESS;
868 struct virtchnl_ether_addr_list *addr_list =
869 (struct virtchnl_ether_addr_list *)msg;
871 struct ether_addr *mac;
874 i40e_pf_host_send_msg_to_vf(
876 VIRTCHNL_OP_DEL_ETH_ADDR,
877 I40E_NOT_SUPPORTED, NULL, 0);
881 if (msg == NULL || msglen <= sizeof(*addr_list)) {
882 PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
883 ret = I40E_ERR_PARAM;
887 for (i = 0; i < addr_list->num_elements; i++) {
888 mac = (struct ether_addr *)(addr_list->list[i].addr);
889 if(is_zero_ether_addr(mac) ||
890 i40e_vsi_delete_mac(vf->vsi, mac)) {
891 ret = I40E_ERR_INVALID_MAC_ADDR;
897 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
904 i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
905 uint8_t *msg, uint16_t msglen,
908 int ret = I40E_SUCCESS;
909 struct virtchnl_vlan_filter_list *vlan_filter_list =
910 (struct virtchnl_vlan_filter_list *)msg;
915 i40e_pf_host_send_msg_to_vf(
917 VIRTCHNL_OP_ADD_VLAN,
918 I40E_NOT_SUPPORTED, NULL, 0);
922 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
923 PMD_DRV_LOG(ERR, "add_vlan argument too short");
924 ret = I40E_ERR_PARAM;
928 vid = vlan_filter_list->vlan_id;
930 for (i = 0; i < vlan_filter_list->num_elements; i++) {
931 ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
932 if(ret != I40E_SUCCESS)
937 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
944 i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
949 int ret = I40E_SUCCESS;
950 struct virtchnl_vlan_filter_list *vlan_filter_list =
951 (struct virtchnl_vlan_filter_list *)msg;
956 i40e_pf_host_send_msg_to_vf(
958 VIRTCHNL_OP_DEL_VLAN,
959 I40E_NOT_SUPPORTED, NULL, 0);
963 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
964 PMD_DRV_LOG(ERR, "delete_vlan argument too short");
965 ret = I40E_ERR_PARAM;
969 vid = vlan_filter_list->vlan_id;
970 for (i = 0; i < vlan_filter_list->num_elements; i++) {
971 ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
972 if(ret != I40E_SUCCESS)
977 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
984 i40e_pf_host_process_cmd_config_promisc_mode(
985 struct i40e_pf_vf *vf,
990 int ret = I40E_SUCCESS;
991 struct virtchnl_promisc_info *promisc =
992 (struct virtchnl_promisc_info *)msg;
993 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
994 bool unicast = FALSE, multicast = FALSE;
997 i40e_pf_host_send_msg_to_vf(
999 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1000 I40E_NOT_SUPPORTED, NULL, 0);
1004 if (msg == NULL || msglen != sizeof(*promisc)) {
1005 ret = I40E_ERR_PARAM;
1009 if (promisc->flags & FLAG_VF_UNICAST_PROMISC)
1011 ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
1012 vf->vsi->seid, unicast, NULL, true);
1013 if (ret != I40E_SUCCESS)
1016 if (promisc->flags & FLAG_VF_MULTICAST_PROMISC)
1018 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
1022 i40e_pf_host_send_msg_to_vf(vf,
1023 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
1029 i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
1031 i40e_update_vsi_stats(vf->vsi);
1034 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
1036 (uint8_t *)&vf->vsi->eth_stats,
1037 sizeof(vf->vsi->eth_stats));
1039 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
1041 (uint8_t *)&vf->vsi->eth_stats,
1042 sizeof(vf->vsi->eth_stats));
1044 return I40E_SUCCESS;
1048 i40e_pf_host_process_cmd_enable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
1050 int ret = I40E_SUCCESS;
1053 i40e_pf_host_send_msg_to_vf(
1055 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1056 I40E_NOT_SUPPORTED, NULL, 0);
1060 ret = i40e_vsi_config_vlan_stripping(vf->vsi, TRUE);
1062 PMD_DRV_LOG(ERR, "Failed to enable vlan stripping");
1064 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1071 i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
1073 int ret = I40E_SUCCESS;
1076 i40e_pf_host_send_msg_to_vf(
1078 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1079 I40E_NOT_SUPPORTED, NULL, 0);
1083 ret = i40e_vsi_config_vlan_stripping(vf->vsi, FALSE);
1085 PMD_DRV_LOG(ERR, "Failed to disable vlan stripping");
1087 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1094 i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
1096 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
1097 struct virtchnl_pf_event event;
1098 uint16_t vf_id = vf->vf_idx;
1099 uint32_t tval, rval;
1101 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1102 event.event_data.link_event.link_status =
1103 dev->data->dev_link.link_status;
1105 /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
1106 switch (dev->data->dev_link.link_speed) {
1107 case ETH_SPEED_NUM_100M:
1108 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
1110 case ETH_SPEED_NUM_1G:
1111 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
1113 case ETH_SPEED_NUM_10G:
1114 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
1116 case ETH_SPEED_NUM_20G:
1117 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
1119 case ETH_SPEED_NUM_25G:
1120 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
1122 case ETH_SPEED_NUM_40G:
1123 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
1126 event.event_data.link_event.link_speed =
1127 VIRTCHNL_LINK_SPEED_UNKNOWN;
1131 tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id));
1132 rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id));
1134 if (tval & I40E_VF_ATQLEN_ATQLEN_MASK ||
1135 tval & I40E_VF_ATQLEN_ATQENABLE_MASK ||
1136 rval & I40E_VF_ARQLEN_ARQLEN_MASK ||
1137 rval & I40E_VF_ARQLEN_ARQENABLE_MASK)
1138 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
1139 I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
1143 i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
1144 uint16_t abs_vf_id, uint32_t opcode,
1145 __rte_unused uint32_t retval,
1149 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1150 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1151 struct i40e_pf_vf *vf;
1152 /* AdminQ will pass absolute VF id, transfer to internal vf id */
1153 uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
1154 struct rte_pmd_i40e_mb_event_param ret_param;
1157 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1158 PMD_DRV_LOG(ERR, "invalid argument");
1162 vf = &pf->vfs[vf_id];
1164 PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
1165 i40e_pf_host_send_msg_to_vf(vf, opcode,
1166 I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
1171 * initialise structure to send to user application
1172 * will return response from user in retval field
1174 ret_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
1175 ret_param.vfid = vf_id;
1176 ret_param.msg_type = opcode;
1177 ret_param.msg = (void *)msg;
1178 ret_param.msglen = msglen;
1181 * Ask user application if we're allowed to perform those functions.
1182 * If we get ret_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
1183 * then business as usual.
1184 * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
1185 * do nothing and send not_supported to VF. As PF must send a response
1186 * to VF and ACK/NACK is not defined.
1188 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
1190 if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
1191 PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
1197 case VIRTCHNL_OP_VERSION:
1198 PMD_DRV_LOG(INFO, "OP_VERSION received");
1199 i40e_pf_host_process_cmd_version(vf, b_op);
1201 case VIRTCHNL_OP_RESET_VF:
1202 PMD_DRV_LOG(INFO, "OP_RESET_VF received");
1203 i40e_pf_host_process_cmd_reset_vf(vf);
1205 case VIRTCHNL_OP_GET_VF_RESOURCES:
1206 PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
1207 i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
1209 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1210 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
1211 i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
1214 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1215 PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
1216 i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
1218 case VIRTCHNL_OP_ENABLE_QUEUES:
1219 PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
1221 i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
1222 i40e_notify_vf_link_status(dev, vf);
1224 i40e_pf_host_send_msg_to_vf(
1225 vf, VIRTCHNL_OP_ENABLE_QUEUES,
1226 I40E_NOT_SUPPORTED, NULL, 0);
1229 case VIRTCHNL_OP_DISABLE_QUEUES:
1230 PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
1231 i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
1233 case VIRTCHNL_OP_ADD_ETH_ADDR:
1234 PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
1235 i40e_pf_host_process_cmd_add_ether_address(vf, msg,
1238 case VIRTCHNL_OP_DEL_ETH_ADDR:
1239 PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
1240 i40e_pf_host_process_cmd_del_ether_address(vf, msg,
1243 case VIRTCHNL_OP_ADD_VLAN:
1244 PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
1245 i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
1247 case VIRTCHNL_OP_DEL_VLAN:
1248 PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
1249 i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
1251 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1252 PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
1253 i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
1256 case VIRTCHNL_OP_GET_STATS:
1257 PMD_DRV_LOG(INFO, "OP_GET_STATS received");
1258 i40e_pf_host_process_cmd_get_stats(vf, b_op);
1260 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1261 PMD_DRV_LOG(INFO, "OP_ENABLE_VLAN_STRIPPING received");
1262 i40e_pf_host_process_cmd_enable_vlan_strip(vf, b_op);
1264 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1265 PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
1266 i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
1268 /* Don't add command supported below, which will
1269 * return an error code.
1272 PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
1273 i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
1280 i40e_pf_host_init(struct rte_eth_dev *dev)
1282 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1283 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1287 PMD_INIT_FUNC_TRACE();
1290 * return if SRIOV not enabled, VF number not configured or
1291 * no queue assigned.
1293 if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
1294 return I40E_SUCCESS;
1296 /* Allocate memory to store VF structure */
1297 pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
1301 /* Disable irq0 for VFR event */
1302 i40e_pf_disable_irq0(hw);
1304 /* Disable VF link status interrupt */
1305 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1306 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1307 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1308 I40E_WRITE_FLUSH(hw);
1310 for (i = 0; i < pf->vf_num; i++) {
1312 pf->vfs[i].state = I40E_VF_INACTIVE;
1313 pf->vfs[i].vf_idx = i;
1314 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
1315 if (ret != I40E_SUCCESS)
1319 RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num;
1321 i40e_pf_enable_irq0(hw);
1323 return I40E_SUCCESS;
1327 i40e_pf_enable_irq0(hw);
1333 i40e_pf_host_uninit(struct rte_eth_dev *dev)
1335 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1336 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1339 PMD_INIT_FUNC_TRACE();
1342 * return if SRIOV not enabled, VF number not configured or
1343 * no queue assigned.
1345 if ((!hw->func_caps.sr_iov_1_1) ||
1346 (pf->vf_num == 0) ||
1347 (pf->vf_nb_qps == 0))
1348 return I40E_SUCCESS;
1350 /* free memory to store VF structure */
1354 /* Disable irq0 for VFR event */
1355 i40e_pf_disable_irq0(hw);
1357 /* Disable VF link status interrupt */
1358 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1359 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1360 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1361 I40E_WRITE_FLUSH(hw);
1363 return I40E_SUCCESS;