1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
14 #include <rte_string_fns.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev.h>
18 #include <rte_malloc.h>
19 #include <rte_memcpy.h>
21 #include "i40e_logs.h"
22 #include "base/i40e_prototype.h"
23 #include "base/i40e_adminq_cmd.h"
24 #include "base/i40e_type.h"
25 #include "i40e_ethdev.h"
26 #include "i40e_rxtx.h"
28 #include "rte_pmd_i40e.h"
30 #define I40E_CFG_CRCSTRIP_DEFAULT 1
33 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
34 struct virtchnl_queue_select *qsel,
38 * Bind PF queues with VSI and VF.
41 i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
44 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
45 uint16_t vsi_id = vf->vsi->vsi_id;
46 uint16_t vf_id = vf->vf_idx;
47 uint16_t nb_qps = vf->vsi->nb_qps;
48 uint16_t qbase = vf->vsi->base_queue;
53 * VF should use scatter range queues. So, it needn't
54 * to set QBASE in this register.
56 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id),
57 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
59 /* Set to enable VFLAN_QTABLE[] registers valid */
60 I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
61 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
63 /* map PF queues to VF */
64 for (i = 0; i < nb_qps; i++) {
65 val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
66 I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
69 /* map PF queues to VSI */
70 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
71 if (2 * i > nb_qps - 1)
72 q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
76 if (2 * i + 1 > nb_qps - 1)
77 q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
79 q2 = qbase + 2 * i + 1;
81 val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
82 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
91 * Proceed VF reset operation.
94 i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
99 uint16_t vf_id, abs_vf_id, vf_msix_num;
101 struct virtchnl_queue_select qsel;
107 hw = I40E_PF_TO_HW(vf->pf);
109 abs_vf_id = vf_id + hw->func_caps.vf_base_id;
111 /* Notify VF that we are in VFR progress */
112 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_INPROGRESS);
115 * If require a SW VF reset, a VFLR interrupt will be generated,
116 * this function will be called again. To avoid it,
117 * disable interrupt first.
120 vf->state = I40E_VF_INRESET;
121 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
122 val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
123 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
124 I40E_WRITE_FLUSH(hw);
127 #define VFRESET_MAX_WAIT_CNT 100
128 /* Wait until VF reset is done */
129 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
131 val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
132 if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
136 if (i >= VFRESET_MAX_WAIT_CNT) {
137 PMD_DRV_LOG(ERR, "VF reset timeout");
140 /* This is not first time to do reset, do cleanup job first */
143 memset(&qsel, 0, sizeof(qsel));
144 for (i = 0; i < vf->vsi->nb_qps; i++)
145 qsel.rx_queues |= 1 << i;
146 qsel.tx_queues = qsel.rx_queues;
147 ret = i40e_pf_host_switch_queues(vf, &qsel, false);
148 if (ret != I40E_SUCCESS) {
149 PMD_DRV_LOG(ERR, "Disable VF queues failed");
153 /* Disable VF interrupt setting */
154 vf_msix_num = hw->func_caps.num_msix_vectors_vf;
155 for (i = 0; i < vf_msix_num; i++) {
157 val = I40E_VFINT_DYN_CTL0(vf_id);
159 val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
161 I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
163 I40E_WRITE_FLUSH(hw);
166 ret = i40e_vsi_release(vf->vsi);
167 if (ret != I40E_SUCCESS) {
168 PMD_DRV_LOG(ERR, "Release VSI failed");
173 #define I40E_VF_PCI_ADDR 0xAA
174 #define I40E_VF_PEND_MASK 0x20
175 /* Check the pending transactions of this VF */
176 /* Use absolute VF id, refer to datasheet for details */
177 I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
178 (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
179 for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
181 val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
182 if ((val & I40E_VF_PEND_MASK) == 0)
186 if (i >= VFRESET_MAX_WAIT_CNT) {
187 PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
191 /* Reset done, Set COMPLETE flag and clear reset bit */
192 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_COMPLETED);
193 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
194 val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
195 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
197 I40E_WRITE_FLUSH(hw);
199 /* Allocate resource again */
200 if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
201 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
204 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
205 vf->pf->main_vsi, vf->vf_idx);
208 if (vf->vsi == NULL) {
209 PMD_DRV_LOG(ERR, "Add vsi failed");
213 ret = i40e_pf_vf_queues_mapping(vf);
214 if (ret != I40E_SUCCESS) {
215 PMD_DRV_LOG(ERR, "queue mapping error");
216 i40e_vsi_release(vf->vsi);
220 I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_VFACTIVE);
226 i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
232 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
233 uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
236 ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
239 PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
240 hw->aq.asq_last_status);
247 i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
249 struct virtchnl_version_info info;
251 /* Respond like a Linux PF host in order to support both DPDK VF and
252 * Linux VF driver. The expense is original DPDK host specific feature
253 * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
255 * DPDK VF also can't identify host driver by version number returned.
256 * It always assume talking with Linux PF.
258 info.major = VIRTCHNL_VERSION_MAJOR;
259 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
262 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
267 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
274 i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
276 i40e_pf_host_vf_reset(vf, 1);
278 /* No feedback will be sent to VF for VFLR */
283 i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
285 struct virtchnl_vf_resource *vf_res = NULL;
286 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
288 int ret = I40E_SUCCESS;
291 i40e_pf_host_send_msg_to_vf(vf,
292 VIRTCHNL_OP_GET_VF_RESOURCES,
293 I40E_NOT_SUPPORTED, NULL, 0);
297 /* only have 1 VSI by default */
298 len = sizeof(struct virtchnl_vf_resource) +
299 I40E_DEFAULT_VF_VSI_NUM *
300 sizeof(struct virtchnl_vsi_resource);
302 vf_res = rte_zmalloc("i40e_vf_res", len, 0);
303 if (vf_res == NULL) {
304 PMD_DRV_LOG(ERR, "failed to allocate mem");
305 ret = I40E_ERR_NO_MEMORY;
311 vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 |
312 VIRTCHNL_VF_OFFLOAD_VLAN;
313 vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
314 vf_res->num_queue_pairs = vf->vsi->nb_qps;
315 vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
317 /* Change below setting if PF host can support more VSIs for VF */
318 vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
319 vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
320 vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
321 ether_addr_copy(&vf->mac_addr,
322 (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
325 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
326 ret, (uint8_t *)vf_res, len);
333 i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
334 struct i40e_pf_vf *vf,
335 struct virtchnl_rxq_info *rxq,
338 int err = I40E_SUCCESS;
339 struct i40e_hmc_obj_rxq rx_ctx;
340 uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
342 /* Clear the context structure first */
343 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
344 rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
345 rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
346 rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
347 rx_ctx.qlen = rxq->ring_len;
348 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
352 if (rxq->splithdr_enabled) {
353 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
354 rx_ctx.dtype = i40e_header_split_enabled;
356 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
357 rx_ctx.dtype = i40e_header_split_none;
359 rx_ctx.rxmax = rxq->max_pkt_size;
360 rx_ctx.tphrdesc_ena = 1;
361 rx_ctx.tphwdesc_ena = 1;
362 rx_ctx.tphdata_ena = 1;
363 rx_ctx.tphhead_ena = 1;
364 rx_ctx.lrxqthresh = 2;
365 rx_ctx.crcstrip = crcstrip;
369 err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
370 if (err != I40E_SUCCESS)
372 err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
377 static inline uint8_t
378 i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
381 struct i40e_aqc_vsi_properties_data *info = &vsi->info;
382 uint16_t bsf, qp_idx;
385 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
386 if (vsi->enabled_tc & (1 << i)) {
387 qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] &
388 I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
389 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT);
390 bsf = rte_le_to_cpu_16((info->tc_mapping[i] &
391 I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
392 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
393 if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf))
401 i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
402 struct i40e_pf_vf *vf,
403 struct virtchnl_txq_info *txq)
405 int err = I40E_SUCCESS;
406 struct i40e_hmc_obj_txq tx_ctx;
407 struct i40e_vsi *vsi = vf->vsi;
409 uint16_t abs_queue_id = vsi->base_queue + txq->queue_id;
412 /* clear the context structure first */
413 memset(&tx_ctx, 0, sizeof(tx_ctx));
414 tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
415 tx_ctx.qlen = txq->ring_len;
416 dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id);
417 tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]);
418 tx_ctx.head_wb_ena = txq->headwb_enabled;
419 tx_ctx.head_wb_addr = txq->dma_headwb_addr;
421 err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
422 if (err != I40E_SUCCESS)
425 err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
426 if (err != I40E_SUCCESS)
429 /* bind queue with VF function, since TX/QX will appear in pair,
430 * so only has QTX_CTL to set.
432 qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
433 ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
434 I40E_QTX_CTL_PF_INDX_MASK) |
435 (((vf->vf_idx + hw->func_caps.vf_base_id) <<
436 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
437 I40E_QTX_CTL_VFVM_INDX_MASK);
438 I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
439 I40E_WRITE_FLUSH(hw);
445 i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
450 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
451 struct i40e_vsi *vsi = vf->vsi;
452 struct virtchnl_vsi_queue_config_info *vc_vqci =
453 (struct virtchnl_vsi_queue_config_info *)msg;
454 struct virtchnl_queue_pair_info *vc_qpi;
455 int i, ret = I40E_SUCCESS;
458 i40e_pf_host_send_msg_to_vf(vf,
459 VIRTCHNL_OP_CONFIG_VSI_QUEUES,
460 I40E_NOT_SUPPORTED, NULL, 0);
464 if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
465 vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
466 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
467 vc_vqci->num_queue_pairs)) {
468 PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
469 ret = I40E_ERR_PARAM;
473 vc_qpi = vc_vqci->qpair;
474 for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
475 if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
476 vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
477 ret = I40E_ERR_PARAM;
482 * Apply VF RX queue setting to HMC.
483 * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
484 * then the extra information of
485 * 'struct virtchnl_queue_pair_extra_info' is needed,
486 * otherwise set the last parameter to NULL.
488 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
489 I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
490 PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
491 ret = I40E_ERR_PARAM;
495 /* Apply VF TX queue setting to HMC */
496 if (i40e_pf_host_hmc_config_txq(hw, vf,
497 &vc_qpi[i].txq) != I40E_SUCCESS) {
498 PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
499 ret = I40E_ERR_PARAM;
505 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
512 i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
513 struct virtchnl_vector_map *vvm)
515 #define BITS_PER_CHAR 8
516 uint64_t linklistmap = 0, tempmap;
517 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
519 bool b_first_q = true;
520 enum i40e_queue_type qtype;
522 uint32_t reg, reg_idx;
523 uint16_t itr_idx = 0, i;
525 vector_id = vvm->vector_id;
528 reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx);
530 reg_idx = I40E_VPINT_LNKLSTN(
531 ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx)
534 if (vvm->rxq_map == 0 && vvm->txq_map == 0) {
535 I40E_WRITE_REG(hw, reg_idx,
536 I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
540 /* sort all rx and tx queues */
541 tempmap = vvm->rxq_map;
542 for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
544 linklistmap |= (1 << (2 * i));
548 tempmap = vvm->txq_map;
549 for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
551 linklistmap |= (1 << (2 * i + 1));
555 /* Link all rx and tx queues into a chained list */
556 tempmap = linklistmap;
561 qtype = (enum i40e_queue_type)(i % 2);
562 qid = vf->vsi->base_queue + i / 2;
567 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
570 /* element in the link list */
572 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
573 (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
574 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
575 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
577 I40E_WRITE_REG(hw, reg_idx, reg);
578 /* find next register to program */
580 case I40E_QUEUE_TYPE_RX:
581 reg_idx = I40E_QINT_RQCTL(qid);
582 itr_idx = vvm->rxitr_idx;
584 case I40E_QUEUE_TYPE_TX:
585 reg_idx = I40E_QINT_TQCTL(qid);
586 itr_idx = vvm->txitr_idx;
596 /* Terminate the link list */
598 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
599 (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
600 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
601 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
602 I40E_WRITE_REG(hw, reg_idx, reg);
605 I40E_WRITE_FLUSH(hw);
609 i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
610 uint8_t *msg, uint16_t msglen,
613 int ret = I40E_SUCCESS;
614 struct i40e_pf *pf = vf->pf;
615 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
616 struct virtchnl_irq_map_info *irqmap =
617 (struct virtchnl_irq_map_info *)msg;
618 struct virtchnl_vector_map *map;
620 uint16_t vector_id, itr_idx;
621 unsigned long qbit_max;
624 i40e_pf_host_send_msg_to_vf(
626 VIRTCHNL_OP_CONFIG_IRQ_MAP,
627 I40E_NOT_SUPPORTED, NULL, 0);
631 if (msg == NULL || msglen < sizeof(struct virtchnl_irq_map_info)) {
632 PMD_DRV_LOG(ERR, "buffer too short");
633 ret = I40E_ERR_PARAM;
637 /* PF host will support both DPDK VF or Linux VF driver, identify by
638 * number of vectors requested.
641 /* DPDK VF only requires single vector */
642 if (irqmap->num_vectors == 1) {
643 /* This MSIX intr store the intr in VF range */
644 vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
645 vf->vsi->nb_msix = irqmap->num_vectors;
646 vf->vsi->nb_used_qps = vf->vsi->nb_qps;
647 itr_idx = irqmap->vecmap[0].rxitr_idx;
649 /* Don't care how the TX/RX queue mapping with this vector.
650 * Link all VF RX queues together. Only did mapping work.
651 * VF can disable/enable the intr by itself.
653 i40e_vsi_queues_bind_intr(vf->vsi, itr_idx);
657 /* Then, it's Linux VF driver */
658 qbit_max = 1 << pf->vf_nb_qp_max;
659 for (i = 0; i < irqmap->num_vectors; i++) {
660 map = &irqmap->vecmap[i];
662 vector_id = map->vector_id;
663 /* validate msg params */
664 if (vector_id >= hw->func_caps.num_msix_vectors_vf) {
665 ret = I40E_ERR_PARAM;
669 if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
670 i40e_pf_config_irq_link_list(vf, map);
672 /* configured queue size excceed limit */
673 ret = I40E_ERR_PARAM;
679 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
686 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
687 struct virtchnl_queue_select *qsel,
690 int ret = I40E_SUCCESS;
692 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
693 uint16_t baseq = vf->vsi->base_queue;
695 if (qsel->rx_queues + qsel->tx_queues == 0)
696 return I40E_ERR_PARAM;
698 /* always enable RX first and disable last */
699 /* Enable RX if it's enable */
701 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
702 if (qsel->rx_queues & (1 << i)) {
703 ret = i40e_switch_rx_queue(hw, baseq + i, on);
704 if (ret != I40E_SUCCESS)
709 /* Enable/Disable TX */
710 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
711 if (qsel->tx_queues & (1 << i)) {
712 ret = i40e_switch_tx_queue(hw, baseq + i, on);
713 if (ret != I40E_SUCCESS)
717 /* disable RX last if it's disable */
720 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
721 if (qsel->rx_queues & (1 << i)) {
722 ret = i40e_switch_rx_queue(hw, baseq + i, on);
723 if (ret != I40E_SUCCESS)
732 i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
736 int ret = I40E_SUCCESS;
737 struct virtchnl_queue_select *q_sel =
738 (struct virtchnl_queue_select *)msg;
740 if (msg == NULL || msglen != sizeof(*q_sel)) {
741 ret = I40E_ERR_PARAM;
744 ret = i40e_pf_host_switch_queues(vf, q_sel, true);
747 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
754 i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
759 int ret = I40E_SUCCESS;
760 struct virtchnl_queue_select *q_sel =
761 (struct virtchnl_queue_select *)msg;
764 i40e_pf_host_send_msg_to_vf(
766 VIRTCHNL_OP_DISABLE_QUEUES,
767 I40E_NOT_SUPPORTED, NULL, 0);
771 if (msg == NULL || msglen != sizeof(*q_sel)) {
772 ret = I40E_ERR_PARAM;
775 ret = i40e_pf_host_switch_queues(vf, q_sel, false);
778 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
786 i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
791 int ret = I40E_SUCCESS;
792 struct virtchnl_ether_addr_list *addr_list =
793 (struct virtchnl_ether_addr_list *)msg;
794 struct i40e_mac_filter_info filter;
796 struct ether_addr *mac;
799 i40e_pf_host_send_msg_to_vf(
801 VIRTCHNL_OP_ADD_ETH_ADDR,
802 I40E_NOT_SUPPORTED, NULL, 0);
806 memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
808 if (msg == NULL || msglen <= sizeof(*addr_list)) {
809 PMD_DRV_LOG(ERR, "add_ether_address argument too short");
810 ret = I40E_ERR_PARAM;
814 for (i = 0; i < addr_list->num_elements; i++) {
815 mac = (struct ether_addr *)(addr_list->list[i].addr);
816 rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
817 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
818 if (is_zero_ether_addr(mac) ||
819 i40e_vsi_add_mac(vf->vsi, &filter)) {
820 ret = I40E_ERR_INVALID_MAC_ADDR;
826 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
833 i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
838 int ret = I40E_SUCCESS;
839 struct virtchnl_ether_addr_list *addr_list =
840 (struct virtchnl_ether_addr_list *)msg;
842 struct ether_addr *mac;
845 i40e_pf_host_send_msg_to_vf(
847 VIRTCHNL_OP_DEL_ETH_ADDR,
848 I40E_NOT_SUPPORTED, NULL, 0);
852 if (msg == NULL || msglen <= sizeof(*addr_list)) {
853 PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
854 ret = I40E_ERR_PARAM;
858 for (i = 0; i < addr_list->num_elements; i++) {
859 mac = (struct ether_addr *)(addr_list->list[i].addr);
860 if(is_zero_ether_addr(mac) ||
861 i40e_vsi_delete_mac(vf->vsi, mac)) {
862 ret = I40E_ERR_INVALID_MAC_ADDR;
868 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
875 i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
876 uint8_t *msg, uint16_t msglen,
879 int ret = I40E_SUCCESS;
880 struct virtchnl_vlan_filter_list *vlan_filter_list =
881 (struct virtchnl_vlan_filter_list *)msg;
886 i40e_pf_host_send_msg_to_vf(
888 VIRTCHNL_OP_ADD_VLAN,
889 I40E_NOT_SUPPORTED, NULL, 0);
893 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
894 PMD_DRV_LOG(ERR, "add_vlan argument too short");
895 ret = I40E_ERR_PARAM;
899 vid = vlan_filter_list->vlan_id;
901 for (i = 0; i < vlan_filter_list->num_elements; i++) {
902 ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
903 if(ret != I40E_SUCCESS)
908 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
915 i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
920 int ret = I40E_SUCCESS;
921 struct virtchnl_vlan_filter_list *vlan_filter_list =
922 (struct virtchnl_vlan_filter_list *)msg;
927 i40e_pf_host_send_msg_to_vf(
929 VIRTCHNL_OP_DEL_VLAN,
930 I40E_NOT_SUPPORTED, NULL, 0);
934 if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
935 PMD_DRV_LOG(ERR, "delete_vlan argument too short");
936 ret = I40E_ERR_PARAM;
940 vid = vlan_filter_list->vlan_id;
941 for (i = 0; i < vlan_filter_list->num_elements; i++) {
942 ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
943 if(ret != I40E_SUCCESS)
948 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
955 i40e_pf_host_process_cmd_config_promisc_mode(
956 struct i40e_pf_vf *vf,
961 int ret = I40E_SUCCESS;
962 struct virtchnl_promisc_info *promisc =
963 (struct virtchnl_promisc_info *)msg;
964 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
965 bool unicast = FALSE, multicast = FALSE;
968 i40e_pf_host_send_msg_to_vf(
970 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
971 I40E_NOT_SUPPORTED, NULL, 0);
975 if (msg == NULL || msglen != sizeof(*promisc)) {
976 ret = I40E_ERR_PARAM;
980 if (promisc->flags & FLAG_VF_UNICAST_PROMISC)
982 ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
983 vf->vsi->seid, unicast, NULL, true);
984 if (ret != I40E_SUCCESS)
987 if (promisc->flags & FLAG_VF_MULTICAST_PROMISC)
989 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
993 i40e_pf_host_send_msg_to_vf(vf,
994 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
1000 i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
1002 i40e_update_vsi_stats(vf->vsi);
1005 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
1007 (uint8_t *)&vf->vsi->eth_stats,
1008 sizeof(vf->vsi->eth_stats));
1010 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
1012 (uint8_t *)&vf->vsi->eth_stats,
1013 sizeof(vf->vsi->eth_stats));
1015 return I40E_SUCCESS;
1019 i40e_pf_host_process_cmd_enable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
1021 int ret = I40E_SUCCESS;
1024 i40e_pf_host_send_msg_to_vf(
1026 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1027 I40E_NOT_SUPPORTED, NULL, 0);
1031 ret = i40e_vsi_config_vlan_stripping(vf->vsi, TRUE);
1033 PMD_DRV_LOG(ERR, "Failed to enable vlan stripping");
1035 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1042 i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
1044 int ret = I40E_SUCCESS;
1047 i40e_pf_host_send_msg_to_vf(
1049 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1050 I40E_NOT_SUPPORTED, NULL, 0);
1054 ret = i40e_vsi_config_vlan_stripping(vf->vsi, FALSE);
1056 PMD_DRV_LOG(ERR, "Failed to disable vlan stripping");
1058 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1065 i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
1067 struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
1068 struct virtchnl_pf_event event;
1069 uint16_t vf_id = vf->vf_idx;
1070 uint32_t tval, rval;
1072 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1073 event.event_data.link_event.link_status =
1074 dev->data->dev_link.link_status;
1076 /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
1077 switch (dev->data->dev_link.link_speed) {
1078 case ETH_SPEED_NUM_100M:
1079 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
1081 case ETH_SPEED_NUM_1G:
1082 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
1084 case ETH_SPEED_NUM_10G:
1085 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
1087 case ETH_SPEED_NUM_20G:
1088 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
1090 case ETH_SPEED_NUM_25G:
1091 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
1093 case ETH_SPEED_NUM_40G:
1094 event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
1097 event.event_data.link_event.link_speed =
1098 VIRTCHNL_LINK_SPEED_UNKNOWN;
1102 tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id));
1103 rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id));
1105 if (tval & I40E_VF_ATQLEN_ATQLEN_MASK ||
1106 tval & I40E_VF_ATQLEN_ATQENABLE_MASK ||
1107 rval & I40E_VF_ARQLEN_ARQLEN_MASK ||
1108 rval & I40E_VF_ARQLEN_ARQENABLE_MASK)
1109 i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
1110 I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
1114 i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
1115 uint16_t abs_vf_id, uint32_t opcode,
1116 __rte_unused uint32_t retval,
1120 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1121 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1122 struct i40e_pf_vf *vf;
1123 /* AdminQ will pass absolute VF id, transfer to internal vf id */
1124 uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
1125 struct rte_pmd_i40e_mb_event_param ret_param;
1128 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1129 PMD_DRV_LOG(ERR, "invalid argument");
1133 vf = &pf->vfs[vf_id];
1135 PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
1136 i40e_pf_host_send_msg_to_vf(vf, opcode,
1137 I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
1142 * initialise structure to send to user application
1143 * will return response from user in retval field
1145 ret_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
1146 ret_param.vfid = vf_id;
1147 ret_param.msg_type = opcode;
1148 ret_param.msg = (void *)msg;
1149 ret_param.msglen = msglen;
1152 * Ask user application if we're allowed to perform those functions.
1153 * If we get ret_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
1154 * then business as usual.
1155 * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
1156 * do nothing and send not_supported to VF. As PF must send a response
1157 * to VF and ACK/NACK is not defined.
1159 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
1161 if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
1162 PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
1168 case VIRTCHNL_OP_VERSION:
1169 PMD_DRV_LOG(INFO, "OP_VERSION received");
1170 i40e_pf_host_process_cmd_version(vf, b_op);
1172 case VIRTCHNL_OP_RESET_VF:
1173 PMD_DRV_LOG(INFO, "OP_RESET_VF received");
1174 i40e_pf_host_process_cmd_reset_vf(vf);
1176 case VIRTCHNL_OP_GET_VF_RESOURCES:
1177 PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
1178 i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
1180 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1181 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
1182 i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
1185 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1186 PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
1187 i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
1189 case VIRTCHNL_OP_ENABLE_QUEUES:
1190 PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
1192 i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
1193 i40e_notify_vf_link_status(dev, vf);
1195 i40e_pf_host_send_msg_to_vf(
1196 vf, VIRTCHNL_OP_ENABLE_QUEUES,
1197 I40E_NOT_SUPPORTED, NULL, 0);
1200 case VIRTCHNL_OP_DISABLE_QUEUES:
1201 PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
1202 i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
1204 case VIRTCHNL_OP_ADD_ETH_ADDR:
1205 PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
1206 i40e_pf_host_process_cmd_add_ether_address(vf, msg,
1209 case VIRTCHNL_OP_DEL_ETH_ADDR:
1210 PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
1211 i40e_pf_host_process_cmd_del_ether_address(vf, msg,
1214 case VIRTCHNL_OP_ADD_VLAN:
1215 PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
1216 i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
1218 case VIRTCHNL_OP_DEL_VLAN:
1219 PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
1220 i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
1222 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1223 PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
1224 i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
1227 case VIRTCHNL_OP_GET_STATS:
1228 PMD_DRV_LOG(INFO, "OP_GET_STATS received");
1229 i40e_pf_host_process_cmd_get_stats(vf, b_op);
1231 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1232 PMD_DRV_LOG(INFO, "OP_ENABLE_VLAN_STRIPPING received");
1233 i40e_pf_host_process_cmd_enable_vlan_strip(vf, b_op);
1235 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1236 PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
1237 i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
1239 /* Don't add command supported below, which will
1240 * return an error code.
1243 PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
1244 i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
1251 i40e_pf_host_init(struct rte_eth_dev *dev)
1253 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1254 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1258 PMD_INIT_FUNC_TRACE();
1261 * return if SRIOV not enabled, VF number not configured or
1262 * no queue assigned.
1264 if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
1265 return I40E_SUCCESS;
1267 /* Allocate memory to store VF structure */
1268 pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
1272 /* Disable irq0 for VFR event */
1273 i40e_pf_disable_irq0(hw);
1275 /* Disable VF link status interrupt */
1276 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1277 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1278 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1279 I40E_WRITE_FLUSH(hw);
1281 for (i = 0; i < pf->vf_num; i++) {
1283 pf->vfs[i].state = I40E_VF_INACTIVE;
1284 pf->vfs[i].vf_idx = i;
1285 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
1286 if (ret != I40E_SUCCESS)
1290 RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num;
1292 i40e_pf_enable_irq0(hw);
1294 return I40E_SUCCESS;
1298 i40e_pf_enable_irq0(hw);
1304 i40e_pf_host_uninit(struct rte_eth_dev *dev)
1306 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1307 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1310 PMD_INIT_FUNC_TRACE();
1313 * return if SRIOV not enabled, VF number not configured or
1314 * no queue assigned.
1316 if ((!hw->func_caps.sr_iov_1_1) ||
1317 (pf->vf_num == 0) ||
1318 (pf->vf_nb_qps == 0))
1319 return I40E_SUCCESS;
1321 /* free memory to store VF structure */
1325 /* Disable irq0 for VFR event */
1326 i40e_pf_disable_irq0(hw);
1328 /* Disable VF link status interrupt */
1329 val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1330 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1331 I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1332 I40E_WRITE_FLUSH(hw);
1334 return I40E_SUCCESS;