net/i40e: fix VF reset flow
[dpdk.git] / drivers / net / i40e / i40e_pf.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50
51 #include "i40e_logs.h"
52 #include "base/i40e_prototype.h"
53 #include "base/i40e_adminq_cmd.h"
54 #include "base/i40e_type.h"
55 #include "i40e_ethdev.h"
56 #include "i40e_rxtx.h"
57 #include "i40e_pf.h"
58 #include "rte_pmd_i40e.h"
59
60 #define I40E_CFG_CRCSTRIP_DEFAULT 1
61
62 static int
63 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
64                            struct i40e_virtchnl_queue_select *qsel,
65                            bool on);
66
67 /**
68  * Bind PF queues with VSI and VF.
69  **/
70 static int
71 i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
72 {
73         int i;
74         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
75         uint16_t vsi_id = vf->vsi->vsi_id;
76         uint16_t vf_id  = vf->vf_idx;
77         uint16_t nb_qps = vf->vsi->nb_qps;
78         uint16_t qbase  = vf->vsi->base_queue;
79         uint16_t q1, q2;
80         uint32_t val;
81
82         /*
83          * VF should use scatter range queues. So, it needn't
84          * to set QBASE in this register.
85          */
86         i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id),
87                           I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
88
89         /* Set to enable VFLAN_QTABLE[] registers valid */
90         I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
91                 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
92
93         /* map PF queues to VF */
94         for (i = 0; i < nb_qps; i++) {
95                 val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
96                 I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
97         }
98
99         /* map PF queues to VSI */
100         for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
101                 if (2 * i > nb_qps - 1)
102                         q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
103                 else
104                         q1 = qbase + 2 * i;
105
106                 if (2 * i + 1 > nb_qps - 1)
107                         q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
108                 else
109                         q2 = qbase + 2 * i + 1;
110
111                 val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
112                 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
113         }
114         I40E_WRITE_FLUSH(hw);
115
116         return I40E_SUCCESS;
117 }
118
119
120 /**
121  * Proceed VF reset operation.
122  */
123 int
124 i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
125 {
126         uint32_t val, i;
127         struct i40e_hw *hw;
128         struct i40e_pf *pf;
129         uint16_t vf_id, abs_vf_id, vf_msix_num;
130         int ret;
131         struct i40e_virtchnl_queue_select qsel;
132
133         if (vf == NULL)
134                 return -EINVAL;
135
136         pf = vf->pf;
137         hw = I40E_PF_TO_HW(vf->pf);
138         vf_id = vf->vf_idx;
139         abs_vf_id = vf_id + hw->func_caps.vf_base_id;
140
141         /* Notify VF that we are in VFR progress */
142         I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS);
143
144         /*
145          * If require a SW VF reset, a VFLR interrupt will be generated,
146          * this function will be called again. To avoid it,
147          * disable interrupt first.
148          */
149         if (do_hw_reset) {
150                 vf->state = I40E_VF_INRESET;
151                 val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
152                 val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
153                 I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
154                 I40E_WRITE_FLUSH(hw);
155         }
156
157 #define VFRESET_MAX_WAIT_CNT 100
158         /* Wait until VF reset is done */
159         for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
160                 rte_delay_us(10);
161                 val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
162                 if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
163                         break;
164         }
165
166         if (i >= VFRESET_MAX_WAIT_CNT) {
167                 PMD_DRV_LOG(ERR, "VF reset timeout");
168                 return -ETIMEDOUT;
169         }
170
171         /* This is not first time to do reset, do cleanup job first */
172         if (vf->vsi) {
173                 /* Disable queues */
174                 memset(&qsel, 0, sizeof(qsel));
175                 for (i = 0; i < vf->vsi->nb_qps; i++)
176                         qsel.rx_queues |= 1 << i;
177                 qsel.tx_queues = qsel.rx_queues;
178                 ret = i40e_pf_host_switch_queues(vf, &qsel, false);
179                 if (ret != I40E_SUCCESS) {
180                         PMD_DRV_LOG(ERR, "Disable VF queues failed");
181                         return -EFAULT;
182                 }
183
184                 /* Disable VF interrupt setting */
185                 vf_msix_num = hw->func_caps.num_msix_vectors_vf;
186                 for (i = 0; i < vf_msix_num; i++) {
187                         if (!i)
188                                 val = I40E_VFINT_DYN_CTL0(vf_id);
189                         else
190                                 val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
191                                                         (vf_id)) + (i - 1));
192                         I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
193                 }
194                 I40E_WRITE_FLUSH(hw);
195
196                 /* remove VSI */
197                 ret = i40e_vsi_release(vf->vsi);
198                 if (ret != I40E_SUCCESS) {
199                         PMD_DRV_LOG(ERR, "Release VSI failed");
200                         return -EFAULT;
201                 }
202         }
203
204 #define I40E_VF_PCI_ADDR  0xAA
205 #define I40E_VF_PEND_MASK 0x20
206         /* Check the pending transactions of this VF */
207         /* Use absolute VF id, refer to datasheet for details */
208         I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
209                 (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
210         for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
211                 rte_delay_us(1);
212                 val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
213                 if ((val & I40E_VF_PEND_MASK) == 0)
214                         break;
215         }
216
217         if (i >= VFRESET_MAX_WAIT_CNT) {
218                 PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
219                 return -ETIMEDOUT;
220         }
221
222         /* Reset done, Set COMPLETE flag and clear reset bit */
223         I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED);
224         val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
225         val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
226         I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
227         vf->reset_cnt++;
228         I40E_WRITE_FLUSH(hw);
229
230         /* Allocate resource again */
231         if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
232                 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
233                                          NULL, vf->vf_idx);
234         } else {
235                 vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
236                                          vf->pf->main_vsi, vf->vf_idx);
237         }
238
239         if (vf->vsi == NULL) {
240                 PMD_DRV_LOG(ERR, "Add vsi failed");
241                 return -EFAULT;
242         }
243
244         ret = i40e_pf_vf_queues_mapping(vf);
245         if (ret != I40E_SUCCESS) {
246                 PMD_DRV_LOG(ERR, "queue mapping error");
247                 i40e_vsi_release(vf->vsi);
248                 return -EFAULT;
249         }
250
251         I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
252
253         return ret;
254 }
255
256 int
257 i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
258                             uint32_t opcode,
259                             uint32_t retval,
260                             uint8_t *msg,
261                             uint16_t msglen)
262 {
263         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
264         uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
265         int ret;
266
267         ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
268                                                 msg, msglen, NULL);
269         if (ret) {
270                 PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
271                              hw->aq.asq_last_status);
272         }
273
274         return ret;
275 }
276
277 static void
278 i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
279 {
280         struct i40e_virtchnl_version_info info;
281
282         info.major = I40E_DPDK_VERSION_MAJOR;
283         info.minor = I40E_DPDK_VERSION_MINOR;
284
285         if (b_op)
286                 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
287                                             I40E_SUCCESS,
288                                             (uint8_t *)&info,
289                                             sizeof(info));
290         else
291                 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
292                                             I40E_NOT_SUPPORTED,
293                                             (uint8_t *)&info,
294                                             sizeof(info));
295 }
296
297 static int
298 i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
299 {
300         i40e_pf_host_vf_reset(vf, 1);
301
302         /* No feedback will be sent to VF for VFLR */
303         return I40E_SUCCESS;
304 }
305
306 static int
307 i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
308 {
309         struct i40e_virtchnl_vf_resource *vf_res = NULL;
310         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
311         uint32_t len = 0;
312         int ret = I40E_SUCCESS;
313
314         if (!b_op) {
315                 i40e_pf_host_send_msg_to_vf(vf,
316                                             I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
317                                             I40E_NOT_SUPPORTED, NULL, 0);
318                 return ret;
319         }
320
321         /* only have 1 VSI by default */
322         len =  sizeof(struct i40e_virtchnl_vf_resource) +
323                                 I40E_DEFAULT_VF_VSI_NUM *
324                 sizeof(struct i40e_virtchnl_vsi_resource);
325
326         vf_res = rte_zmalloc("i40e_vf_res", len, 0);
327         if (vf_res == NULL) {
328                 PMD_DRV_LOG(ERR, "failed to allocate mem");
329                 ret = I40E_ERR_NO_MEMORY;
330                 vf_res = NULL;
331                 len = 0;
332                 goto send_msg;
333         }
334
335         vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
336                                 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
337         vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
338         vf_res->num_queue_pairs = vf->vsi->nb_qps;
339         vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
340
341         /* Change below setting if PF host can support more VSIs for VF */
342         vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
343         /* As assume Vf only has single VSI now, always return 0 */
344         vf_res->vsi_res[0].vsi_id = 0;
345         vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
346         ether_addr_copy(&vf->mac_addr,
347                 (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
348
349 send_msg:
350         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
351                                         ret, (uint8_t *)vf_res, len);
352         rte_free(vf_res);
353
354         return ret;
355 }
356
357 static int
358 i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
359                             struct i40e_pf_vf *vf,
360                             struct i40e_virtchnl_rxq_info *rxq,
361                             uint8_t crcstrip)
362 {
363         int err = I40E_SUCCESS;
364         struct i40e_hmc_obj_rxq rx_ctx;
365         uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
366
367         /* Clear the context structure first */
368         memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
369         rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
370         rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
371         rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
372         rx_ctx.qlen = rxq->ring_len;
373 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
374         rx_ctx.dsize = 1;
375 #endif
376
377         if (rxq->splithdr_enabled) {
378                 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
379                 rx_ctx.dtype = i40e_header_split_enabled;
380         } else {
381                 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
382                 rx_ctx.dtype = i40e_header_split_none;
383         }
384         rx_ctx.rxmax = rxq->max_pkt_size;
385         rx_ctx.tphrdesc_ena = 1;
386         rx_ctx.tphwdesc_ena = 1;
387         rx_ctx.tphdata_ena = 1;
388         rx_ctx.tphhead_ena = 1;
389         rx_ctx.lrxqthresh = 2;
390         rx_ctx.crcstrip = crcstrip;
391         rx_ctx.l2tsel = 1;
392         rx_ctx.prefena = 1;
393
394         err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
395         if (err != I40E_SUCCESS)
396                 return err;
397         err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
398
399         return err;
400 }
401
402 static int
403 i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
404                             struct i40e_pf_vf *vf,
405                             struct i40e_virtchnl_txq_info *txq)
406 {
407         int err = I40E_SUCCESS;
408         struct i40e_hmc_obj_txq tx_ctx;
409         uint32_t qtx_ctl;
410         uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;
411
412
413         /* clear the context structure first */
414         memset(&tx_ctx, 0, sizeof(tx_ctx));
415         tx_ctx.new_context = 1;
416         tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
417         tx_ctx.qlen = txq->ring_len;
418         tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
419         err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
420         if (err != I40E_SUCCESS)
421                 return err;
422
423         err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
424         if (err != I40E_SUCCESS)
425                 return err;
426
427         /* bind queue with VF function, since TX/QX will appear in pair,
428          * so only has QTX_CTL to set.
429          */
430         qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
431                                 ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
432                                 I40E_QTX_CTL_PF_INDX_MASK) |
433                                 (((vf->vf_idx + hw->func_caps.vf_base_id) <<
434                                 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
435                                 I40E_QTX_CTL_VFVM_INDX_MASK);
436         I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
437         I40E_WRITE_FLUSH(hw);
438
439         return I40E_SUCCESS;
440 }
441
442 static int
443 i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
444                                            uint8_t *msg,
445                                            uint16_t msglen,
446                                            bool b_op)
447 {
448         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
449         struct i40e_vsi *vsi = vf->vsi;
450         struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
451                 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
452         struct i40e_virtchnl_queue_pair_info *vc_qpi;
453         int i, ret = I40E_SUCCESS;
454
455         if (!b_op) {
456                 i40e_pf_host_send_msg_to_vf(vf,
457                                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
458                                             I40E_NOT_SUPPORTED, NULL, 0);
459                 return ret;
460         }
461
462         if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
463                 vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
464                 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
465                                         vc_vqci->num_queue_pairs)) {
466                 PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
467                 ret = I40E_ERR_PARAM;
468                 goto send_msg;
469         }
470
471         vc_qpi = vc_vqci->qpair;
472         for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
473                 if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
474                         vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
475                         ret = I40E_ERR_PARAM;
476                         goto send_msg;
477                 }
478
479                 /*
480                  * Apply VF RX queue setting to HMC.
481                  * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
482                  * then the extra information of
483                  * 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
484                  * otherwise set the last parameter to NULL.
485                  */
486                 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
487                         I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
488                         PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
489                         ret = I40E_ERR_PARAM;
490                         goto send_msg;
491                 }
492
493                 /* Apply VF TX queue setting to HMC */
494                 if (i40e_pf_host_hmc_config_txq(hw, vf,
495                         &vc_qpi[i].txq) != I40E_SUCCESS) {
496                         PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
497                         ret = I40E_ERR_PARAM;
498                         goto send_msg;
499                 }
500         }
501
502 send_msg:
503         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
504                                                         ret, NULL, 0);
505
506         return ret;
507 }
508
509 static int
510 i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
511                                                uint8_t *msg,
512                                                uint16_t msglen,
513                                                bool b_op)
514 {
515         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
516         struct i40e_vsi *vsi = vf->vsi;
517         struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei =
518                 (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg;
519         struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
520         int i, ret = I40E_SUCCESS;
521
522         if (!b_op) {
523                 i40e_pf_host_send_msg_to_vf(
524                         vf,
525                         I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
526                         I40E_NOT_SUPPORTED, NULL, 0);
527                 return ret;
528         }
529
530         if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
531                 vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
532                 msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
533                                         vc_vqcei->num_queue_pairs)) {
534                 PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
535                 ret = I40E_ERR_PARAM;
536                 goto send_msg;
537         }
538
539         vc_qpei = vc_vqcei->qpair;
540         for (i = 0; i < vc_vqcei->num_queue_pairs; i++) {
541                 if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 ||
542                         vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) {
543                         ret = I40E_ERR_PARAM;
544                         goto send_msg;
545                 }
546                 /*
547                  * Apply VF RX queue setting to HMC.
548                  * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
549                  * then the extra information of
550                  * 'struct i40e_virtchnl_queue_pair_ext_info' is needed,
551                  * otherwise set the last parameter to NULL.
552                  */
553                 if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
554                         vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) {
555                         PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
556                         ret = I40E_ERR_PARAM;
557                         goto send_msg;
558                 }
559
560                 /* Apply VF TX queue setting to HMC */
561                 if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) !=
562                                                         I40E_SUCCESS) {
563                         PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
564                         ret = I40E_ERR_PARAM;
565                         goto send_msg;
566                 }
567         }
568
569 send_msg:
570         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
571                                                                 ret, NULL, 0);
572
573         return ret;
574 }
575
576 static int
577 i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
578                                         uint8_t *msg, uint16_t msglen,
579                                         bool b_op)
580 {
581         int ret = I40E_SUCCESS;
582         struct i40e_virtchnl_irq_map_info *irqmap =
583             (struct i40e_virtchnl_irq_map_info *)msg;
584
585         if (!b_op) {
586                 i40e_pf_host_send_msg_to_vf(
587                         vf,
588                         I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
589                         I40E_NOT_SUPPORTED, NULL, 0);
590                 return ret;
591         }
592
593         if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
594                 PMD_DRV_LOG(ERR, "buffer too short");
595                 ret = I40E_ERR_PARAM;
596                 goto send_msg;
597         }
598
599         /* Assume VF only have 1 vector to bind all queues */
600         if (irqmap->num_vectors != 1) {
601                 PMD_DRV_LOG(ERR, "DKDK host only support 1 vector");
602                 ret = I40E_ERR_PARAM;
603                 goto send_msg;
604         }
605
606         /* This MSIX intr store the intr in VF range */
607         vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
608         vf->vsi->nb_msix = irqmap->num_vectors;
609         vf->vsi->nb_used_qps = vf->vsi->nb_qps;
610
611         /* Don't care how the TX/RX queue mapping with this vector.
612          * Link all VF RX queues together. Only did mapping work.
613          * VF can disable/enable the intr by itself.
614          */
615         i40e_vsi_queues_bind_intr(vf->vsi);
616 send_msg:
617         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
618                                                         ret, NULL, 0);
619
620         return ret;
621 }
622
623 static int
624 i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
625                            struct i40e_virtchnl_queue_select *qsel,
626                            bool on)
627 {
628         int ret = I40E_SUCCESS;
629         int i;
630         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
631         uint16_t baseq = vf->vsi->base_queue;
632
633         if (qsel->rx_queues + qsel->tx_queues == 0)
634                 return I40E_ERR_PARAM;
635
636         /* always enable RX first and disable last */
637         /* Enable RX if it's enable */
638         if (on) {
639                 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
640                         if (qsel->rx_queues & (1 << i)) {
641                                 ret = i40e_switch_rx_queue(hw, baseq + i, on);
642                                 if (ret != I40E_SUCCESS)
643                                         return ret;
644                         }
645         }
646
647         /* Enable/Disable TX */
648         for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
649                 if (qsel->tx_queues & (1 << i)) {
650                         ret = i40e_switch_tx_queue(hw, baseq + i, on);
651                         if (ret != I40E_SUCCESS)
652                                 return ret;
653                 }
654
655         /* disable RX last if it's disable */
656         if (!on) {
657                 /* disable RX */
658                 for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
659                         if (qsel->rx_queues & (1 << i)) {
660                                 ret = i40e_switch_rx_queue(hw, baseq + i, on);
661                                 if (ret != I40E_SUCCESS)
662                                         return ret;
663                         }
664         }
665
666         return ret;
667 }
668
669 static int
670 i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
671                                        uint8_t *msg,
672                                        uint16_t msglen)
673 {
674         int ret = I40E_SUCCESS;
675         struct i40e_virtchnl_queue_select *q_sel =
676                 (struct i40e_virtchnl_queue_select *)msg;
677
678         if (msg == NULL || msglen != sizeof(*q_sel)) {
679                 ret = I40E_ERR_PARAM;
680                 goto send_msg;
681         }
682         ret = i40e_pf_host_switch_queues(vf, q_sel, true);
683
684 send_msg:
685         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
686                                                         ret, NULL, 0);
687
688         return ret;
689 }
690
691 static int
692 i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
693                                         uint8_t *msg,
694                                         uint16_t msglen,
695                                         bool b_op)
696 {
697         int ret = I40E_SUCCESS;
698         struct i40e_virtchnl_queue_select *q_sel =
699                 (struct i40e_virtchnl_queue_select *)msg;
700
701         if (!b_op) {
702                 i40e_pf_host_send_msg_to_vf(
703                         vf,
704                         I40E_VIRTCHNL_OP_DISABLE_QUEUES,
705                         I40E_NOT_SUPPORTED, NULL, 0);
706                 return ret;
707         }
708
709         if (msg == NULL || msglen != sizeof(*q_sel)) {
710                 ret = I40E_ERR_PARAM;
711                 goto send_msg;
712         }
713         ret = i40e_pf_host_switch_queues(vf, q_sel, false);
714
715 send_msg:
716         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
717                                                         ret, NULL, 0);
718
719         return ret;
720 }
721
722
723 static int
724 i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
725                                            uint8_t *msg,
726                                            uint16_t msglen,
727                                            bool b_op)
728 {
729         int ret = I40E_SUCCESS;
730         struct i40e_virtchnl_ether_addr_list *addr_list =
731                         (struct i40e_virtchnl_ether_addr_list *)msg;
732         struct i40e_mac_filter_info filter;
733         int i;
734         struct ether_addr *mac;
735
736         if (!b_op) {
737                 i40e_pf_host_send_msg_to_vf(
738                         vf,
739                         I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
740                         I40E_NOT_SUPPORTED, NULL, 0);
741                 return ret;
742         }
743
744         memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
745
746         if (msg == NULL || msglen <= sizeof(*addr_list)) {
747                 PMD_DRV_LOG(ERR, "add_ether_address argument too short");
748                 ret = I40E_ERR_PARAM;
749                 goto send_msg;
750         }
751
752         for (i = 0; i < addr_list->num_elements; i++) {
753                 mac = (struct ether_addr *)(addr_list->list[i].addr);
754                 (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
755                 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
756                 if(!is_valid_assigned_ether_addr(mac) ||
757                         i40e_vsi_add_mac(vf->vsi, &filter)) {
758                         ret = I40E_ERR_INVALID_MAC_ADDR;
759                         goto send_msg;
760                 }
761         }
762
763 send_msg:
764         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
765                                                         ret, NULL, 0);
766
767         return ret;
768 }
769
770 static int
771 i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
772                                            uint8_t *msg,
773                                            uint16_t msglen,
774                                            bool b_op)
775 {
776         int ret = I40E_SUCCESS;
777         struct i40e_virtchnl_ether_addr_list *addr_list =
778                 (struct i40e_virtchnl_ether_addr_list *)msg;
779         int i;
780         struct ether_addr *mac;
781
782         if (!b_op) {
783                 i40e_pf_host_send_msg_to_vf(
784                         vf,
785                         I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
786                         I40E_NOT_SUPPORTED, NULL, 0);
787                 return ret;
788         }
789
790         if (msg == NULL || msglen <= sizeof(*addr_list)) {
791                 PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
792                 ret = I40E_ERR_PARAM;
793                 goto send_msg;
794         }
795
796         for (i = 0; i < addr_list->num_elements; i++) {
797                 mac = (struct ether_addr *)(addr_list->list[i].addr);
798                 if(!is_valid_assigned_ether_addr(mac) ||
799                         i40e_vsi_delete_mac(vf->vsi, mac)) {
800                         ret = I40E_ERR_INVALID_MAC_ADDR;
801                         goto send_msg;
802                 }
803         }
804
805 send_msg:
806         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
807                                                         ret, NULL, 0);
808
809         return ret;
810 }
811
812 static int
813 i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
814                                 uint8_t *msg, uint16_t msglen,
815                                 bool b_op)
816 {
817         int ret = I40E_SUCCESS;
818         struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
819                 (struct i40e_virtchnl_vlan_filter_list *)msg;
820         int i;
821         uint16_t *vid;
822
823         if (!b_op) {
824                 i40e_pf_host_send_msg_to_vf(
825                         vf,
826                         I40E_VIRTCHNL_OP_ADD_VLAN,
827                         I40E_NOT_SUPPORTED, NULL, 0);
828                 return ret;
829         }
830
831         if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
832                 PMD_DRV_LOG(ERR, "add_vlan argument too short");
833                 ret = I40E_ERR_PARAM;
834                 goto send_msg;
835         }
836
837         vid = vlan_filter_list->vlan_id;
838
839         for (i = 0; i < vlan_filter_list->num_elements; i++) {
840                 ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
841                 if(ret != I40E_SUCCESS)
842                         goto send_msg;
843         }
844
845 send_msg:
846         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN,
847                                                 ret, NULL, 0);
848
849         return ret;
850 }
851
852 static int
853 i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
854                                   uint8_t *msg,
855                                   uint16_t msglen,
856                                   bool b_op)
857 {
858         int ret = I40E_SUCCESS;
859         struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
860                         (struct i40e_virtchnl_vlan_filter_list *)msg;
861         int i;
862         uint16_t *vid;
863
864         if (!b_op) {
865                 i40e_pf_host_send_msg_to_vf(
866                         vf,
867                         I40E_VIRTCHNL_OP_DEL_VLAN,
868                         I40E_NOT_SUPPORTED, NULL, 0);
869                 return ret;
870         }
871
872         if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
873                 PMD_DRV_LOG(ERR, "delete_vlan argument too short");
874                 ret = I40E_ERR_PARAM;
875                 goto send_msg;
876         }
877
878         vid = vlan_filter_list->vlan_id;
879         for (i = 0; i < vlan_filter_list->num_elements; i++) {
880                 ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
881                 if(ret != I40E_SUCCESS)
882                         goto send_msg;
883         }
884
885 send_msg:
886         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN,
887                                                 ret, NULL, 0);
888
889         return ret;
890 }
891
892 static int
893 i40e_pf_host_process_cmd_config_promisc_mode(
894                                         struct i40e_pf_vf *vf,
895                                         uint8_t *msg,
896                                         uint16_t msglen,
897                                         bool b_op)
898 {
899         int ret = I40E_SUCCESS;
900         struct i40e_virtchnl_promisc_info *promisc =
901                                 (struct i40e_virtchnl_promisc_info *)msg;
902         struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
903         bool unicast = FALSE, multicast = FALSE;
904
905         if (!b_op) {
906                 i40e_pf_host_send_msg_to_vf(
907                         vf,
908                         I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
909                         I40E_NOT_SUPPORTED, NULL, 0);
910                 return ret;
911         }
912
913         if (msg == NULL || msglen != sizeof(*promisc)) {
914                 ret = I40E_ERR_PARAM;
915                 goto send_msg;
916         }
917
918         if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC)
919                 unicast = TRUE;
920         ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
921                         vf->vsi->seid, unicast, NULL, true);
922         if (ret != I40E_SUCCESS)
923                 goto send_msg;
924
925         if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
926                 multicast = TRUE;
927         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
928                                                 multicast, NULL);
929
930 send_msg:
931         i40e_pf_host_send_msg_to_vf(vf,
932                 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
933
934         return ret;
935 }
936
937 static int
938 i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
939 {
940         i40e_update_vsi_stats(vf->vsi);
941
942         if (b_op)
943                 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
944                                             I40E_SUCCESS,
945                                             (uint8_t *)&vf->vsi->eth_stats,
946                                             sizeof(vf->vsi->eth_stats));
947         else
948                 i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
949                                             I40E_NOT_SUPPORTED,
950                                             (uint8_t *)&vf->vsi->eth_stats,
951                                             sizeof(vf->vsi->eth_stats));
952
953         return I40E_SUCCESS;
954 }
955
956 static int
957 i40e_pf_host_process_cmd_cfg_vlan_offload(
958                                         struct i40e_pf_vf *vf,
959                                         uint8_t *msg,
960                                         uint16_t msglen,
961                                         bool b_op)
962 {
963         int ret = I40E_SUCCESS;
964         struct i40e_virtchnl_vlan_offload_info *offload =
965                         (struct i40e_virtchnl_vlan_offload_info *)msg;
966
967         if (!b_op) {
968                 i40e_pf_host_send_msg_to_vf(
969                         vf,
970                         I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
971                         I40E_NOT_SUPPORTED, NULL, 0);
972                 return ret;
973         }
974
975         if (msg == NULL || msglen != sizeof(*offload)) {
976                 ret = I40E_ERR_PARAM;
977                 goto send_msg;
978         }
979
980         ret = i40e_vsi_config_vlan_stripping(vf->vsi,
981                                                 !!offload->enable_vlan_strip);
982         if (ret != 0)
983                 PMD_DRV_LOG(ERR, "Failed to configure vlan stripping");
984
985 send_msg:
986         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
987                                         ret, NULL, 0);
988
989         return ret;
990 }
991
992 static int
993 i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
994                                         uint8_t *msg,
995                                         uint16_t msglen,
996                                         bool b_op)
997 {
998         int ret = I40E_SUCCESS;
999         struct i40e_virtchnl_pvid_info  *tpid_info =
1000                         (struct i40e_virtchnl_pvid_info *)msg;
1001
1002         if (!b_op) {
1003                 i40e_pf_host_send_msg_to_vf(
1004                         vf,
1005                         I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
1006                         I40E_NOT_SUPPORTED, NULL, 0);
1007                 return ret;
1008         }
1009
1010         if (msg == NULL || msglen != sizeof(*tpid_info)) {
1011                 ret = I40E_ERR_PARAM;
1012                 goto send_msg;
1013         }
1014
1015         ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info);
1016
1017 send_msg:
1018         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
1019                                         ret, NULL, 0);
1020
1021         return ret;
1022 }
1023
1024 void
1025 i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
1026 {
1027         struct i40e_virtchnl_pf_event event;
1028
1029         event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1030         event.event_data.link_event.link_status =
1031                 dev->data->dev_link.link_status;
1032         event.event_data.link_event.link_speed =
1033                 (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
1034         i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
1035                 I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
1036 }
1037
1038 void
1039 i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
1040                            uint16_t abs_vf_id, uint32_t opcode,
1041                            __rte_unused uint32_t retval,
1042                            uint8_t *msg,
1043                            uint16_t msglen)
1044 {
1045         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1046         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1047         struct i40e_pf_vf *vf;
1048         /* AdminQ will pass absolute VF id, transfer to internal vf id */
1049         uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
1050         struct rte_pmd_i40e_mb_event_param cb_param;
1051         bool b_op = TRUE;
1052
1053         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1054                 PMD_DRV_LOG(ERR, "invalid argument");
1055                 return;
1056         }
1057
1058         vf = &pf->vfs[vf_id];
1059         if (!vf->vsi) {
1060                 PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
1061                 i40e_pf_host_send_msg_to_vf(vf, opcode,
1062                         I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
1063                 return;
1064         }
1065
1066         /**
1067          * initialise structure to send to user application
1068          * will return response from user in retval field
1069          */
1070         cb_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
1071         cb_param.vfid = vf_id;
1072         cb_param.msg_type = opcode;
1073         cb_param.msg = (void *)msg;
1074         cb_param.msglen = msglen;
1075
1076         /**
1077          * Ask user application if we're allowed to perform those functions.
1078          * If we get cb_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
1079          * then business as usual.
1080          * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
1081          * do nothing and send not_supported to VF. As PF must send a response
1082          * to VF and ACK/NACK is not defined.
1083          */
1084         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
1085         if (cb_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
1086                 PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
1087                             opcode);
1088                 b_op = FALSE;
1089         }
1090
1091         switch (opcode) {
1092         case I40E_VIRTCHNL_OP_VERSION :
1093                 PMD_DRV_LOG(INFO, "OP_VERSION received");
1094                 i40e_pf_host_process_cmd_version(vf, b_op);
1095                 break;
1096         case I40E_VIRTCHNL_OP_RESET_VF :
1097                 PMD_DRV_LOG(INFO, "OP_RESET_VF received");
1098                 i40e_pf_host_process_cmd_reset_vf(vf);
1099                 break;
1100         case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1101                 PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
1102                 i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
1103                 break;
1104         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1105                 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
1106                 i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
1107                                                            msglen, b_op);
1108                 break;
1109         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
1110                 PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
1111                 i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
1112                                                                msglen, b_op);
1113                 break;
1114         case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1115                 PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
1116                 i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
1117                 break;
1118         case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1119                 PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
1120                 if (b_op) {
1121                         i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
1122                         i40e_notify_vf_link_status(dev, vf);
1123                 } else {
1124                         i40e_pf_host_send_msg_to_vf(
1125                                 vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1126                                 I40E_NOT_SUPPORTED, NULL, 0);
1127                 }
1128                 break;
1129         case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1130                 PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
1131                 i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
1132                 break;
1133         case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1134                 PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
1135                 i40e_pf_host_process_cmd_add_ether_address(vf, msg,
1136                                                            msglen, b_op);
1137                 break;
1138         case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1139                 PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
1140                 i40e_pf_host_process_cmd_del_ether_address(vf, msg,
1141                                                            msglen, b_op);
1142                 break;
1143         case I40E_VIRTCHNL_OP_ADD_VLAN:
1144                 PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
1145                 i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
1146                 break;
1147         case I40E_VIRTCHNL_OP_DEL_VLAN:
1148                 PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
1149                 i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
1150                 break;
1151         case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1152                 PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
1153                 i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
1154                                                              msglen, b_op);
1155                 break;
1156         case I40E_VIRTCHNL_OP_GET_STATS:
1157                 PMD_DRV_LOG(INFO, "OP_GET_STATS received");
1158                 i40e_pf_host_process_cmd_get_stats(vf, b_op);
1159                 break;
1160         case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
1161                 PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
1162                 i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg,
1163                                                           msglen, b_op);
1164                 break;
1165         case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
1166                 PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
1167                 i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op);
1168                 break;
1169         /* Don't add command supported below, which will
1170          * return an error code.
1171          */
1172         default:
1173                 PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
1174                 i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
1175                                                                 NULL, 0);
1176                 break;
1177         }
1178 }
1179
1180 int
1181 i40e_pf_host_init(struct rte_eth_dev *dev)
1182 {
1183         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1184         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1185         int ret, i;
1186         uint32_t val;
1187
1188         PMD_INIT_FUNC_TRACE();
1189
1190         /**
1191          * return if SRIOV not enabled, VF number not configured or
1192          * no queue assigned.
1193          */
1194         if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
1195                 return I40E_SUCCESS;
1196
1197         /* Allocate memory to store VF structure */
1198         pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
1199         if(pf->vfs == NULL)
1200                 return -ENOMEM;
1201
1202         /* Disable irq0 for VFR event */
1203         i40e_pf_disable_irq0(hw);
1204
1205         /* Disable VF link status interrupt */
1206         val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1207         val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1208         I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1209         I40E_WRITE_FLUSH(hw);
1210
1211         for (i = 0; i < pf->vf_num; i++) {
1212                 pf->vfs[i].pf = pf;
1213                 pf->vfs[i].state = I40E_VF_INACTIVE;
1214                 pf->vfs[i].vf_idx = i;
1215                 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
1216                 if (ret != I40E_SUCCESS)
1217                         goto fail;
1218                 eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
1219         }
1220
1221         /* restore irq0 */
1222         i40e_pf_enable_irq0(hw);
1223
1224         return I40E_SUCCESS;
1225
1226 fail:
1227         rte_free(pf->vfs);
1228         i40e_pf_enable_irq0(hw);
1229
1230         return ret;
1231 }
1232
1233 int
1234 i40e_pf_host_uninit(struct rte_eth_dev *dev)
1235 {
1236         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1237         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1238         uint32_t val;
1239
1240         PMD_INIT_FUNC_TRACE();
1241
1242         /**
1243          * return if SRIOV not enabled, VF number not configured or
1244          * no queue assigned.
1245          */
1246         if ((!hw->func_caps.sr_iov_1_1) ||
1247                 (pf->vf_num == 0) ||
1248                 (pf->vf_nb_qps == 0))
1249                 return I40E_SUCCESS;
1250
1251         /* free memory to store VF structure */
1252         rte_free(pf->vfs);
1253         pf->vfs = NULL;
1254
1255         /* Disable irq0 for VFR event */
1256         i40e_pf_disable_irq0(hw);
1257
1258         /* Disable VF link status interrupt */
1259         val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
1260         val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
1261         I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
1262         I40E_WRITE_FLUSH(hw);
1263
1264         return I40E_SUCCESS;
1265 }