4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
51 #include "i40e_logs.h"
52 #include "base/i40e_prototype.h"
53 #include "base/i40e_adminq_cmd.h"
54 #include "base/i40e_type.h"
55 #include "i40e_ethdev.h"
56 #include "i40e_rxtx.h"
58 #include "rte_pmd_i40e.h"
60 #define I40E_CFG_CRCSTRIP_DEFAULT 1
63 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
64 struct i40e_virtchnl_queue_select *qsel,
68 * Bind PF queues with VSI and VF.
71 i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
74 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
75 uint16_t vsi_id = vf->vsi->vsi_id;
76 uint16_t vf_id = vf->vf_idx;
77 uint16_t nb_qps = vf->vsi->nb_qps;
78 uint16_t qbase = vf->vsi->base_queue;
83 * VF should use scatter range queues. So, it needn't
84 * to set QBASE in this register.
86 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id),
87 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
89 /* Set to enable VFLAN_QTABLE[] registers valid */
90 I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
91 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
93 /* map PF queues to VF */
94 for (i = 0; i < nb_qps; i++) {
95 val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
96 I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
99 /* map PF queues to VSI */
100 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
101 if (2 * i > nb_qps - 1)
102 q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
106 if (2 * i + 1 > nb_qps - 1)
107 q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
109 q2 = qbase + 2 * i + 1;
111 val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
112 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
114 I40E_WRITE_FLUSH(hw);
121 * Proceed VF reset operation.
124 i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
129 uint16_t vf_id, abs_vf_id, vf_msix_num;
131 struct i40e_virtchnl_queue_select qsel;
137 hw = I40E_PF_TO_HW(vf->pf);
139 abs_vf_id = vf_id + hw->func_caps.vf_base_id;
141 /* Notify VF that we are in VFR progress */
142 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS);
145 * If require a SW VF reset, a VFLR interrupt will be generated,
146 * this function will be called again. To avoid it,
147 * disable interrupt first.
150 vf->state = I40E_VF_INRESET;
151 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
152 val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
153 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
154 I40E_WRITE_FLUSH(hw);
157 #define VFRESET_MAX_WAIT_CNT 100
158 /* Wait until VF reset is done */
159 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
161 val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
162 if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
166 if (i >= VFRESET_MAX_WAIT_CNT) {
167 PMD_DRV_LOG(ERR, "VF reset timeout");
171 /* This is not first time to do reset, do cleanup job first */
174 memset(&qsel, 0, sizeof(qsel));
175 for (i = 0; i < vf->vsi->nb_qps; i++)
176 qsel.rx_queues |= 1 << i;
177 qsel.tx_queues = qsel.rx_queues;
178 ret = i40e_pf_host_switch_queues(vf, &qsel, false);
179 if (ret != I40E_SUCCESS) {
180 PMD_DRV_LOG(ERR, "Disable VF queues failed");
184 /* Disable VF interrupt setting */
185 vf_msix_num = hw->func_caps.num_msix_vectors_vf;
186 for (i = 0; i < vf_msix_num; i++) {
188 val = I40E_VFINT_DYN_CTL0(vf_id);
190 val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
192 I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
194 I40E_WRITE_FLUSH(hw);
197 ret = i40e_vsi_release(vf->vsi);
198 if (ret != I40E_SUCCESS) {
199 PMD_DRV_LOG(ERR, "Release VSI failed");
204 #define I40E_VF_PCI_ADDR 0xAA
205 #define I40E_VF_PEND_MASK 0x20
206 /* Check the pending transactions of this VF */
207 /* Use absolute VF id, refer to datasheet for details */
208 I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
209 (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
210 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
212 val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
213 if ((val & I40E_VF_PEND_MASK) == 0)
217 if (i >= VFRESET_MAX_WAIT_CNT) {
218 PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
222 /* Reset done, Set COMPLETE flag and clear reset bit */
223 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED);
224 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
225 val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
226 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
228 I40E_WRITE_FLUSH(hw);
230 /* Allocate resource again */
231 if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
232 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
235 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
236 vf->pf->main_vsi, vf->vf_idx);
239 if (vf->vsi == NULL) {
240 PMD_DRV_LOG(ERR, "Add vsi failed");
244 ret = i40e_pf_vf_queues_mapping(vf);
245 if (ret != I40E_SUCCESS) {
246 PMD_DRV_LOG(ERR, "queue mapping error");
247 i40e_vsi_release(vf->vsi);
251 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
257 i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
263 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
264 uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
267 ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
270 PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
271 hw->aq.asq_last_status);
278 i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
280 struct i40e_virtchnl_version_info info;
282 /* Respond like a Linux PF host in order to support both DPDK VF and
283 * Linux VF driver. The expense is original DPDK host specific feature
284 * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
286 * DPDK VF also can't identify host driver by version number returned.
287 * It always assume talking with Linux PF.
289 info.major = I40E_VIRTCHNL_VERSION_MAJOR;
290 info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
293 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
298 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
305 i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
307 i40e_pf_host_vf_reset(vf, 1);
309 /* No feedback will be sent to VF for VFLR */
314 i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
316 struct i40e_virtchnl_vf_resource *vf_res = NULL;
317 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
319 int ret = I40E_SUCCESS;
322 i40e_pf_host_send_msg_to_vf(vf,
323 I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
324 I40E_NOT_SUPPORTED, NULL, 0);
328 /* only have 1 VSI by default */
329 len = sizeof(struct i40e_virtchnl_vf_resource) +
330 I40E_DEFAULT_VF_VSI_NUM *
331 sizeof(struct i40e_virtchnl_vsi_resource);
333 vf_res = rte_zmalloc("i40e_vf_res", len, 0);
334 if (vf_res == NULL) {
335 PMD_DRV_LOG(ERR, "failed to allocate mem");
336 ret = I40E_ERR_NO_MEMORY;
342 vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
343 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
344 vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
345 vf_res->num_queue_pairs = vf->vsi->nb_qps;
346 vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
348 /* Change below setting if PF host can support more VSIs for VF */
349 vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
350 /* As assume Vf only has single VSI now, always return 0 */
351 vf_res->vsi_res[0].vsi_id = 0;
352 vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
353 ether_addr_copy(&vf->mac_addr,
354 (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
357 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
358 ret, (uint8_t *)vf_res, len);
365 i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
366 struct i40e_pf_vf *vf,
367 struct i40e_virtchnl_rxq_info *rxq,
370 int err = I40E_SUCCESS;
371 struct i40e_hmc_obj_rxq rx_ctx;
372 uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
374 /* Clear the context structure first */
375 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
376 rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
377 rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
378 rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
379 rx_ctx.qlen = rxq->ring_len;
380 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
384 if (rxq->splithdr_enabled) {
385 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
386 rx_ctx.dtype = i40e_header_split_enabled;
388 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
389 rx_ctx.dtype = i40e_header_split_none;
391 rx_ctx.rxmax = rxq->max_pkt_size;
392 rx_ctx.tphrdesc_ena = 1;
393 rx_ctx.tphwdesc_ena = 1;
394 rx_ctx.tphdata_ena = 1;
395 rx_ctx.tphhead_ena = 1;
396 rx_ctx.lrxqthresh = 2;
397 rx_ctx.crcstrip = crcstrip;
401 err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
402 if (err != I40E_SUCCESS)
404 err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
410 i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
411 struct i40e_pf_vf *vf,
412 struct i40e_virtchnl_txq_info *txq)
414 int err = I40E_SUCCESS;
415 struct i40e_hmc_obj_txq tx_ctx;
417 uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;
420 /* clear the context structure first */
421 memset(&tx_ctx, 0, sizeof(tx_ctx));
422 tx_ctx.new_context = 1;
423 tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
424 tx_ctx.qlen = txq->ring_len;
425 tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
426 err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
427 if (err != I40E_SUCCESS)
430 err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
431 if (err != I40E_SUCCESS)
434 /* bind queue with VF function, since TX/QX will appear in pair,
435 * so only has QTX_CTL to set.
437 qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
438 ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
439 I40E_QTX_CTL_PF_INDX_MASK) |
440 (((vf->vf_idx + hw->func_caps.vf_base_id) <<
441 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
442 I40E_QTX_CTL_VFVM_INDX_MASK);
443 I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
444 I40E_WRITE_FLUSH(hw);
450 i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
455 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
456 struct i40e_vsi *vsi = vf->vsi;
457 struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
458 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
459 struct i40e_virtchnl_queue_pair_info *vc_qpi;
460 int i, ret = I40E_SUCCESS;
463 i40e_pf_host_send_msg_to_vf(vf,
464 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
465 I40E_NOT_SUPPORTED, NULL, 0);
469 if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
470 vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
471 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
472 vc_vqci->num_queue_pairs)) {
473 PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
474 ret = I40E_ERR_PARAM;
478 vc_qpi = vc_vqci->qpair;
479 for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
480 if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
481 vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
482 ret = I40E_ERR_PARAM;
487 * Apply VF RX queue setting to HMC.
488 * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
489 * then the extra information of
490 * 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
491 * otherwise set the last parameter to NULL.
493 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
494 I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
495 PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
496 ret = I40E_ERR_PARAM;
500 /* Apply VF TX queue setting to HMC */
501 if (i40e_pf_host_hmc_config_txq(hw, vf,
502 &vc_qpi[i].txq) != I40E_SUCCESS) {
503 PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
504 ret = I40E_ERR_PARAM;
510 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
517 i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
522 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
523 struct i40e_vsi *vsi = vf->vsi;
524 struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei =
525 (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg;
526 struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
527 int i, ret = I40E_SUCCESS;
530 i40e_pf_host_send_msg_to_vf(
532 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
533 I40E_NOT_SUPPORTED, NULL, 0);
537 if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
538 vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
539 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
540 vc_vqcei->num_queue_pairs)) {
541 PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
542 ret = I40E_ERR_PARAM;
546 vc_qpei = vc_vqcei->qpair;
547 for (i = 0; i < vc_vqcei->num_queue_pairs; i++) {
548 if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 ||
549 vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) {
550 ret = I40E_ERR_PARAM;
554 * Apply VF RX queue setting to HMC.
555 * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
556 * then the extra information of
557 * 'struct i40e_virtchnl_queue_pair_ext_info' is needed,
558 * otherwise set the last parameter to NULL.
560 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
561 vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) {
562 PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
563 ret = I40E_ERR_PARAM;
567 /* Apply VF TX queue setting to HMC */
568 if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) !=
570 PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
571 ret = I40E_ERR_PARAM;
577 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
584 i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
585 uint8_t *msg, uint16_t msglen,
588 int ret = I40E_SUCCESS;
589 struct i40e_virtchnl_irq_map_info *irqmap =
590 (struct i40e_virtchnl_irq_map_info *)msg;
593 i40e_pf_host_send_msg_to_vf(
595 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
596 I40E_NOT_SUPPORTED, NULL, 0);
600 if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
601 PMD_DRV_LOG(ERR, "buffer too short");
602 ret = I40E_ERR_PARAM;
606 /* Assume VF only have 1 vector to bind all queues */
607 if (irqmap->num_vectors != 1) {
608 PMD_DRV_LOG(ERR, "DKDK host only support 1 vector");
609 ret = I40E_ERR_PARAM;
613 /* This MSIX intr store the intr in VF range */
614 vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
615 vf->vsi->nb_msix = irqmap->num_vectors;
616 vf->vsi->nb_used_qps = vf->vsi->nb_qps;
618 /* Don't care how the TX/RX queue mapping with this vector.
619 * Link all VF RX queues together. Only did mapping work.
620 * VF can disable/enable the intr by itself.
622 i40e_vsi_queues_bind_intr(vf->vsi);
624 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
631 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
632 struct i40e_virtchnl_queue_select *qsel,
635 int ret = I40E_SUCCESS;
637 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
638 uint16_t baseq = vf->vsi->base_queue;
640 if (qsel->rx_queues + qsel->tx_queues == 0)
641 return I40E_ERR_PARAM;
643 /* always enable RX first and disable last */
644 /* Enable RX if it's enable */
646 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
647 if (qsel->rx_queues & (1 << i)) {
648 ret = i40e_switch_rx_queue(hw, baseq + i, on);
649 if (ret != I40E_SUCCESS)
654 /* Enable/Disable TX */
655 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
656 if (qsel->tx_queues & (1 << i)) {
657 ret = i40e_switch_tx_queue(hw, baseq + i, on);
658 if (ret != I40E_SUCCESS)
662 /* disable RX last if it's disable */
665 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
666 if (qsel->rx_queues & (1 << i)) {
667 ret = i40e_switch_rx_queue(hw, baseq + i, on);
668 if (ret != I40E_SUCCESS)
677 i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
681 int ret = I40E_SUCCESS;
682 struct i40e_virtchnl_queue_select *q_sel =
683 (struct i40e_virtchnl_queue_select *)msg;
685 if (msg == NULL || msglen != sizeof(*q_sel)) {
686 ret = I40E_ERR_PARAM;
689 ret = i40e_pf_host_switch_queues(vf, q_sel, true);
692 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
699 i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
704 int ret = I40E_SUCCESS;
705 struct i40e_virtchnl_queue_select *q_sel =
706 (struct i40e_virtchnl_queue_select *)msg;
709 i40e_pf_host_send_msg_to_vf(
711 I40E_VIRTCHNL_OP_DISABLE_QUEUES,
712 I40E_NOT_SUPPORTED, NULL, 0);
716 if (msg == NULL || msglen != sizeof(*q_sel)) {
717 ret = I40E_ERR_PARAM;
720 ret = i40e_pf_host_switch_queues(vf, q_sel, false);
723 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
731 i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
736 int ret = I40E_SUCCESS;
737 struct i40e_virtchnl_ether_addr_list *addr_list =
738 (struct i40e_virtchnl_ether_addr_list *)msg;
739 struct i40e_mac_filter_info filter;
741 struct ether_addr *mac;
744 i40e_pf_host_send_msg_to_vf(
746 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
747 I40E_NOT_SUPPORTED, NULL, 0);
751 memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
753 if (msg == NULL || msglen <= sizeof(*addr_list)) {
754 PMD_DRV_LOG(ERR, "add_ether_address argument too short");
755 ret = I40E_ERR_PARAM;
759 for (i = 0; i < addr_list->num_elements; i++) {
760 mac = (struct ether_addr *)(addr_list->list[i].addr);
761 (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
762 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
763 if (is_zero_ether_addr(mac) ||
764 i40e_vsi_add_mac(vf->vsi, &filter)) {
765 ret = I40E_ERR_INVALID_MAC_ADDR;
771 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
778 i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
783 int ret = I40E_SUCCESS;
784 struct i40e_virtchnl_ether_addr_list *addr_list =
785 (struct i40e_virtchnl_ether_addr_list *)msg;
787 struct ether_addr *mac;
790 i40e_pf_host_send_msg_to_vf(
792 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
793 I40E_NOT_SUPPORTED, NULL, 0);
797 if (msg == NULL || msglen <= sizeof(*addr_list)) {
798 PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
799 ret = I40E_ERR_PARAM;
803 for (i = 0; i < addr_list->num_elements; i++) {
804 mac = (struct ether_addr *)(addr_list->list[i].addr);
805 if(!is_valid_assigned_ether_addr(mac) ||
806 i40e_vsi_delete_mac(vf->vsi, mac)) {
807 ret = I40E_ERR_INVALID_MAC_ADDR;
813 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
820 i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
821 uint8_t *msg, uint16_t msglen,
824 int ret = I40E_SUCCESS;
825 struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
826 (struct i40e_virtchnl_vlan_filter_list *)msg;
831 i40e_pf_host_send_msg_to_vf(
833 I40E_VIRTCHNL_OP_ADD_VLAN,
834 I40E_NOT_SUPPORTED, NULL, 0);
838 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
839 PMD_DRV_LOG(ERR, "add_vlan argument too short");
840 ret = I40E_ERR_PARAM;
844 vid = vlan_filter_list->vlan_id;
846 for (i = 0; i < vlan_filter_list->num_elements; i++) {
847 ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
848 if(ret != I40E_SUCCESS)
853 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN,
860 i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
865 int ret = I40E_SUCCESS;
866 struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
867 (struct i40e_virtchnl_vlan_filter_list *)msg;
872 i40e_pf_host_send_msg_to_vf(
874 I40E_VIRTCHNL_OP_DEL_VLAN,
875 I40E_NOT_SUPPORTED, NULL, 0);
879 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
880 PMD_DRV_LOG(ERR, "delete_vlan argument too short");
881 ret = I40E_ERR_PARAM;
885 vid = vlan_filter_list->vlan_id;
886 for (i = 0; i < vlan_filter_list->num_elements; i++) {
887 ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
888 if(ret != I40E_SUCCESS)
893 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN,
900 i40e_pf_host_process_cmd_config_promisc_mode(
901 struct i40e_pf_vf *vf,
906 int ret = I40E_SUCCESS;
907 struct i40e_virtchnl_promisc_info *promisc =
908 (struct i40e_virtchnl_promisc_info *)msg;
909 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
910 bool unicast = FALSE, multicast = FALSE;
913 i40e_pf_host_send_msg_to_vf(
915 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
916 I40E_NOT_SUPPORTED, NULL, 0);
920 if (msg == NULL || msglen != sizeof(*promisc)) {
921 ret = I40E_ERR_PARAM;
925 if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC)
927 ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
928 vf->vsi->seid, unicast, NULL, true);
929 if (ret != I40E_SUCCESS)
932 if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
934 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
938 i40e_pf_host_send_msg_to_vf(vf,
939 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
945 i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
947 i40e_update_vsi_stats(vf->vsi);
950 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
952 (uint8_t *)&vf->vsi->eth_stats,
953 sizeof(vf->vsi->eth_stats));
955 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
957 (uint8_t *)&vf->vsi->eth_stats,
958 sizeof(vf->vsi->eth_stats));
964 i40e_pf_host_process_cmd_cfg_vlan_offload(
965 struct i40e_pf_vf *vf,
970 int ret = I40E_SUCCESS;
971 struct i40e_virtchnl_vlan_offload_info *offload =
972 (struct i40e_virtchnl_vlan_offload_info *)msg;
975 i40e_pf_host_send_msg_to_vf(
977 I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
978 I40E_NOT_SUPPORTED, NULL, 0);
982 if (msg == NULL || msglen != sizeof(*offload)) {
983 ret = I40E_ERR_PARAM;
987 ret = i40e_vsi_config_vlan_stripping(vf->vsi,
988 !!offload->enable_vlan_strip);
990 PMD_DRV_LOG(ERR, "Failed to configure vlan stripping");
993 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
1000 i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
1005 int ret = I40E_SUCCESS;
1006 struct i40e_virtchnl_pvid_info *tpid_info =
1007 (struct i40e_virtchnl_pvid_info *)msg;
1010 i40e_pf_host_send_msg_to_vf(
1012 I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
1013 I40E_NOT_SUPPORTED, NULL, 0);
1017 if (msg == NULL || msglen != sizeof(*tpid_info)) {
1018 ret = I40E_ERR_PARAM;
1022 ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info);
1025 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
1032 i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
1034 struct i40e_virtchnl_pf_event event;
1036 event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1037 event.event_data.link_event.link_status =
1038 dev->data->dev_link.link_status;
1039 event.event_data.link_event.link_speed =
1040 (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
1041 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
1042 I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
1046 i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
1047 uint16_t abs_vf_id, uint32_t opcode,
1048 __rte_unused uint32_t retval,
1052 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1053 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1054 struct i40e_pf_vf *vf;
1055 /* AdminQ will pass absolute VF id, transfer to internal vf id */
1056 uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
1057 struct rte_pmd_i40e_mb_event_param cb_param;
1060 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1061 PMD_DRV_LOG(ERR, "invalid argument");
1065 vf = &pf->vfs[vf_id];
1067 PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
1068 i40e_pf_host_send_msg_to_vf(vf, opcode,
1069 I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
1074 * initialise structure to send to user application
1075 * will return response from user in retval field
1077 cb_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
1078 cb_param.vfid = vf_id;
1079 cb_param.msg_type = opcode;
1080 cb_param.msg = (void *)msg;
1081 cb_param.msglen = msglen;
1084 * Ask user application if we're allowed to perform those functions.
1085 * If we get cb_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
1086 * then business as usual.
1087 * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
1088 * do nothing and send not_supported to VF. As PF must send a response
1089 * to VF and ACK/NACK is not defined.
1091 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
1092 if (cb_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
1093 PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
1099 case I40E_VIRTCHNL_OP_VERSION :
1100 PMD_DRV_LOG(INFO, "OP_VERSION received");
1101 i40e_pf_host_process_cmd_version(vf, b_op);
1103 case I40E_VIRTCHNL_OP_RESET_VF :
1104 PMD_DRV_LOG(INFO, "OP_RESET_VF received");
1105 i40e_pf_host_process_cmd_reset_vf(vf);
1107 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1108 PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
1109 i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
1111 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1112 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
1113 i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
1116 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
1117 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
1118 i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
1121 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1122 PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
1123 i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
1125 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1126 PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
1128 i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
1129 i40e_notify_vf_link_status(dev, vf);
1131 i40e_pf_host_send_msg_to_vf(
1132 vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1133 I40E_NOT_SUPPORTED, NULL, 0);
1136 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1137 PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
1138 i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
1140 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1141 PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
1142 i40e_pf_host_process_cmd_add_ether_address(vf, msg,
1145 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1146 PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
1147 i40e_pf_host_process_cmd_del_ether_address(vf, msg,
1150 case I40E_VIRTCHNL_OP_ADD_VLAN:
1151 PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
1152 i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
1154 case I40E_VIRTCHNL_OP_DEL_VLAN:
1155 PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
1156 i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
1158 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1159 PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
1160 i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
1163 case I40E_VIRTCHNL_OP_GET_STATS:
1164 PMD_DRV_LOG(INFO, "OP_GET_STATS received");
1165 i40e_pf_host_process_cmd_get_stats(vf, b_op);
1167 case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
1168 PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
1169 i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg,
1172 case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
1173 PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
1174 i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op);
1176 /* Don't add command supported below, which will
1177 * return an error code.
1180 PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
1181 i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
1188 i40e_pf_host_init(struct rte_eth_dev *dev)
1190 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1191 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1195 PMD_INIT_FUNC_TRACE();
1198 * return if SRIOV not enabled, VF number not configured or
1199 * no queue assigned.
1201 if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
1202 return I40E_SUCCESS;
1204 /* Allocate memory to store VF structure */
1205 pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
1209 /* Disable irq0 for VFR event */
1210 i40e_pf_disable_irq0(hw);
1212 /* Disable VF link status interrupt */
1213 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1214 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1215 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1216 I40E_WRITE_FLUSH(hw);
1218 for (i = 0; i < pf->vf_num; i++) {
1220 pf->vfs[i].state = I40E_VF_INACTIVE;
1221 pf->vfs[i].vf_idx = i;
1222 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
1223 if (ret != I40E_SUCCESS)
1225 eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
1229 i40e_pf_enable_irq0(hw);
1231 return I40E_SUCCESS;
1235 i40e_pf_enable_irq0(hw);
1241 i40e_pf_host_uninit(struct rte_eth_dev *dev)
1243 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1244 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1247 PMD_INIT_FUNC_TRACE();
1250 * return if SRIOV not enabled, VF number not configured or
1251 * no queue assigned.
1253 if ((!hw->func_caps.sr_iov_1_1) ||
1254 (pf->vf_num == 0) ||
1255 (pf->vf_nb_qps == 0))
1256 return I40E_SUCCESS;
1258 /* free memory to store VF structure */
1262 /* Disable irq0 for VFR event */
1263 i40e_pf_disable_irq0(hw);
1265 /* Disable VF link status interrupt */
1266 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1267 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1268 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1269 I40E_WRITE_FLUSH(hw);
1271 return I40E_SUCCESS;