i40e: new poll mode driver
[dpdk.git] / lib / librte_pmd_i40e / i40e_ethdev_vf.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
45
46 #include <rte_interrupts.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_dev.h>
62
63 #include "i40e_logs.h"
64 #include "i40e/i40e_prototype.h"
65 #include "i40e/i40e_adminq_cmd.h"
66 #include "i40e/i40e_type.h"
67
68 #include "i40e_rxtx.h"
69 #include "i40e_ethdev.h"
70 #include "i40e_pf.h"
71 #define I40EVF_VSI_DEFAULT_MSIX_INTR 1
72
73 /* busy wait delay in msec */
74 #define I40EVF_BUSY_WAIT_DELAY 10
75 #define I40EVF_BUSY_WAIT_COUNT 50
76 #define MAX_RESET_WAIT_CNT     20
77
78 struct i40evf_arq_msg_info {
79         enum i40e_virtchnl_ops ops;
80         enum i40e_status_code result;
81         uint16_t msg_len;
82         uint8_t *msg;
83 };
84
85 struct vf_cmd_info {
86         enum i40e_virtchnl_ops ops;
87         uint8_t *in_args;
88         uint32_t in_args_size;
89         uint8_t *out_buffer;
90         /* Input & output type. pass in buffer size and pass out
91          * actual return result
92          */
93         uint32_t out_size;
94 };
95
96 enum i40evf_aq_result {
97         I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
98         I40EVF_MSG_NON,      /* Read nothing from admin queue */
99         I40EVF_MSG_SYS,      /* Read system msg from admin queue */
100         I40EVF_MSG_CMD,      /* Read async command result */
101 };
102
103 /* A share buffer to store the command result from PF driver */
104 static uint8_t cmd_result_buffer[I40E_AQ_BUF_SZ];
105
106 static int i40evf_dev_configure(struct rte_eth_dev *dev);
107 static int i40evf_dev_start(struct rte_eth_dev *dev);
108 static void i40evf_dev_stop(struct rte_eth_dev *dev);
109 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
110                                 struct rte_eth_dev_info *dev_info);
111 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
112                                   __rte_unused int wait_to_complete);
113 static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
114                                 struct rte_eth_stats *stats);
115 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
116                                   uint16_t vlan_id, int on);
117 static void i40evf_dev_close(struct rte_eth_dev *dev);
118 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
119 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
120 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
121 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
122 static int i40evf_get_link_status(struct rte_eth_dev *dev,
123                                   struct rte_eth_link *link);
124 static struct eth_dev_ops i40evf_eth_dev_ops = {
125         .dev_configure        = i40evf_dev_configure,
126         .dev_start            = i40evf_dev_start,
127         .dev_stop             = i40evf_dev_stop,
128         .promiscuous_enable   = i40evf_dev_promiscuous_enable,
129         .promiscuous_disable  = i40evf_dev_promiscuous_disable,
130         .allmulticast_enable  = i40evf_dev_allmulticast_enable,
131         .allmulticast_disable = i40evf_dev_allmulticast_disable,
132         .link_update          = i40evf_dev_link_update,
133         .stats_get            = i40evf_dev_stats_get,
134         .dev_close            = i40evf_dev_close,
135         .dev_infos_get        = i40evf_dev_info_get,
136         .vlan_filter_set      = i40evf_vlan_filter_set,
137         .rx_queue_setup       = i40e_dev_rx_queue_setup,
138         .rx_queue_release     = i40e_dev_rx_queue_release,
139         .tx_queue_setup       = i40e_dev_tx_queue_setup,
140         .tx_queue_release     = i40e_dev_tx_queue_release,
141 };
142
143 static int
144 i40evf_set_mac_type(struct i40e_hw *hw)
145 {
146         int status = I40E_ERR_DEVICE_NOT_SUPPORTED;
147
148         if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
149                 switch (hw->device_id) {
150                 case I40E_DEV_ID_VF:
151                 case I40E_DEV_ID_VF_HV:
152                         hw->mac.type = I40E_MAC_VF;
153                         status = I40E_SUCCESS;
154                         break;
155                 default:
156                         ;
157                 }
158         }
159
160         return status;
161 }
162
163 /*
164  * Parse admin queue message.
165  *
166  * return value:
167  *  < 0: meet error
168  *  0: read sys msg
169  *  > 0: read cmd result
170  */
171 static enum i40evf_aq_result
172 i40evf_parse_pfmsg(struct i40e_vf *vf,
173                    struct i40e_arq_event_info *event,
174                    struct i40evf_arq_msg_info *data)
175 {
176         enum i40e_virtchnl_ops opcode = (enum i40e_virtchnl_ops)\
177                         rte_le_to_cpu_32(event->desc.cookie_high);
178         enum i40e_status_code retval = (enum i40e_status_code)\
179                         rte_le_to_cpu_32(event->desc.cookie_low);
180         enum i40evf_aq_result ret = I40EVF_MSG_CMD;
181
182         /* pf sys event */
183         if (opcode == I40E_VIRTCHNL_OP_EVENT) {
184                 struct i40e_virtchnl_pf_event *vpe =
185                         (struct i40e_virtchnl_pf_event *)event->msg_buf;
186
187                 /* Initialize ret to sys event */
188                 ret = I40EVF_MSG_SYS;
189                 switch (vpe->event) {
190                 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
191                         vf->link_up =
192                                 vpe->event_data.link_event.link_status;
193                         vf->pend_msg |= PFMSG_LINK_CHANGE;
194                         PMD_DRV_LOG(INFO, "Link status update:%s\n",
195                                         vf->link_up ? "up" : "down");
196                         break;
197                 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
198                         vf->vf_reset = true;
199                         vf->pend_msg |= PFMSG_RESET_IMPENDING;
200                         PMD_DRV_LOG(INFO, "vf is reseting\n");
201                         break;
202                 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
203                         vf->dev_closed = true;
204                         vf->pend_msg |= PFMSG_DRIVER_CLOSE;
205                         PMD_DRV_LOG(INFO, "PF driver closed\n");
206                         break;
207                 default:
208                         PMD_DRV_LOG(ERR,
209                                 "%s: Unknown event %d from pf\n",
210                                 __func__, vpe->event);
211                 }
212         } else {
213                 /* async reply msg on command issued by vf previously */
214                 ret = I40EVF_MSG_CMD;
215                 /* Actual buffer length read from PF */
216                 data->msg_len = event->msg_size;
217         }
218         /* fill the ops and result to notify VF */
219         data->result = retval;
220         data->ops = opcode;
221
222         return ret;
223 }
224
225 /*
226  * Read data in admin queue to get msg from pf driver
227  */
228 static enum i40evf_aq_result
229 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
230 {
231         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
232         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
233         struct i40e_arq_event_info event;
234         int ret;
235         enum i40evf_aq_result result = I40EVF_MSG_NON;
236
237         event.msg_size = data->msg_len;
238         event.msg_buf = data->msg;
239         ret = i40e_clean_arq_element(hw, &event, NULL);
240         /* Can't read any msg from adminQ */
241         if (ret) {
242                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
243                         result = I40EVF_MSG_NON;
244                 else
245                         result = I40EVF_MSG_ERR;
246                 return result;
247         }
248
249         /* Parse the event */
250         result = i40evf_parse_pfmsg(vf, &event, data);
251
252         return result;
253 }
254
255 /*
256  * Polling read until command result return from pf driver or meet error.
257  */
258 static int
259 i40evf_wait_cmd_done(struct rte_eth_dev *dev,
260                      struct i40evf_arq_msg_info *data)
261 {
262         int i = 0;
263         enum i40evf_aq_result ret;
264
265 #define MAX_TRY_TIMES 10
266 #define ASQ_DELAY_MS  50
267         do {
268                 /* Delay some time first */
269                 rte_delay_ms(ASQ_DELAY_MS);
270                 ret = i40evf_read_pfmsg(dev, data);
271
272                 if (ret == I40EVF_MSG_CMD)
273                         return 0;
274                 else if (ret == I40EVF_MSG_ERR)
275                         return -1;
276
277                 /* If don't read msg or read sys event, continue */
278         } while(i++ < MAX_TRY_TIMES);
279
280         return -1;
281 }
282
283 /**
284  * clear current command. Only call in case execute
285  * _atomic_set_cmd successfully.
286  */
287 static inline void
288 _clear_cmd(struct i40e_vf *vf)
289 {
290         rte_wmb();
291         vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
292 }
293
294 /*
295  * Check there is pending cmd in execution. If none, set new command.
296  */
297 static inline int
298 _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
299 {
300         int ret = rte_atomic32_cmpset(&vf->pend_cmd,
301                         I40E_VIRTCHNL_OP_UNKNOWN, ops);
302
303         if (!ret)
304                 PMD_DRV_LOG(ERR, "There is incomplete cmd %d\n", vf->pend_cmd);
305
306         return !ret;
307 }
308
309 static int
310 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
311 {
312         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
313         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
314         int err = -1;
315         struct i40evf_arq_msg_info info;
316
317         if (_atomic_set_cmd(vf, args->ops))
318                 return -1;
319
320         info.msg = args->out_buffer;
321         info.msg_len = args->out_size;
322         info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
323         info.result = I40E_SUCCESS;
324
325         err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
326                      args->in_args, args->in_args_size, NULL);
327         if (err) {
328                 PMD_DRV_LOG(ERR, "fail to send cmd %d\n", args->ops);
329                 return err;
330         }
331
332         err = i40evf_wait_cmd_done(dev, &info);
333         /* read message and it's expected one */
334         if (!err && args->ops == info.ops)
335                 _clear_cmd(vf);
336         else if (err)
337                 PMD_DRV_LOG(ERR, "Failed to read message from AdminQ\n");
338         else if (args->ops != info.ops)
339                 PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u\n",
340                                 args->ops, info.ops);
341
342         return (err | info.result);
343 }
344
345 /*
346  * Check API version with sync wait until version read or fail from admin queue
347  */
348 static int
349 i40evf_check_api_version(struct rte_eth_dev *dev)
350 {
351         struct i40e_virtchnl_version_info version, *pver;
352         int err;
353         struct vf_cmd_info args;
354         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
355
356         version.major = I40E_VIRTCHNL_VERSION_MAJOR;
357         version.minor = I40E_VIRTCHNL_VERSION_MINOR;
358
359         args.ops = I40E_VIRTCHNL_OP_VERSION;
360         args.in_args = (uint8_t *)&version;
361         args.in_args_size = sizeof(version);
362         args.out_buffer = cmd_result_buffer;
363         args.out_size = I40E_AQ_BUF_SZ;
364
365         err = i40evf_execute_vf_cmd(dev, &args);
366         if (err) {
367                 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION\n");
368                 return err;
369         }
370
371         pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
372         /* We are talking with DPDK host */
373         if (pver->major == I40E_DPDK_VERSION_MAJOR) {
374                 vf->host_is_dpdk = TRUE;
375                 PMD_DRV_LOG(INFO, "Detect PF host is DPDK app\n");
376         }
377         /* It's linux host driver */
378         else if ((pver->major != version.major) ||
379             (pver->minor != version.minor)) {
380                 PMD_INIT_LOG(ERR, "pf/vf API version mismatch. "
381                         "(%u.%u)-(%u.%u)\n", pver->major, pver->minor,
382                                         version.major, version.minor);
383                 return -1;
384         }
385
386         return 0;
387 }
388
389 static int
390 i40evf_get_vf_resource(struct rte_eth_dev *dev)
391 {
392         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
393         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
394         int err;
395         struct vf_cmd_info args;
396         uint32_t len;
397
398         args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
399         args.in_args = NULL;
400         args.in_args_size = 0;
401         args.out_buffer = cmd_result_buffer;
402         args.out_size = I40E_AQ_BUF_SZ;
403
404         err = i40evf_execute_vf_cmd(dev, &args);
405
406         if (err) {
407                 PMD_DRV_LOG(ERR, "fail to execute command "
408                                         "OP_GET_VF_RESOURCE\n");
409                 return err;
410         }
411
412         len =  sizeof(struct i40e_virtchnl_vf_resource) +
413                 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
414
415         (void)rte_memcpy(vf->vf_res, args.out_buffer,
416                         RTE_MIN(args.out_size, len));
417         i40e_vf_parse_hw_config(hw, vf->vf_res);
418
419         return 0;
420 }
421
422 static int
423 i40evf_config_promisc(struct rte_eth_dev *dev,
424                       bool enable_unicast,
425                       bool enable_multicast)
426 {
427         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
428         int err;
429         struct vf_cmd_info args;
430         struct i40e_virtchnl_promisc_info promisc;
431
432         promisc.flags = 0;
433         promisc.vsi_id = vf->vsi_res->vsi_id;
434
435         if (enable_unicast)
436                 promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
437
438         if (enable_multicast)
439                 promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
440
441         args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
442         args.in_args = (uint8_t *)&promisc;
443         args.in_args_size = sizeof(promisc);
444         args.out_buffer = cmd_result_buffer;
445         args.out_size = I40E_AQ_BUF_SZ;
446
447         err = i40evf_execute_vf_cmd(dev, &args);
448
449         if (err)
450                 PMD_DRV_LOG(ERR, "fail to execute command "
451                                 "CONFIG_PROMISCUOUS_MODE\n");
452
453         return err;
454 }
455
456 static int
457 i40evf_configure_queues(struct rte_eth_dev *dev)
458 {
459         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
460         struct i40e_virtchnl_vsi_queue_config_info *queue_info;
461         struct i40e_virtchnl_queue_pair_info *queue_cfg;
462         struct i40e_rx_queue **rxq =
463                 (struct i40e_rx_queue **)dev->data->rx_queues;
464         struct i40e_tx_queue **txq =
465                 (struct i40e_tx_queue **)dev->data->tx_queues;
466         int i, len, nb_qpairs, num_rxq, num_txq;
467         int err;
468         struct vf_cmd_info args;
469         struct rte_pktmbuf_pool_private *mbp_priv;
470
471         nb_qpairs = vf->num_queue_pairs;
472         len = sizeof(*queue_info) + sizeof(*queue_cfg) * nb_qpairs;
473         queue_info = rte_zmalloc("queue_info", len, 0);
474         if (queue_info == NULL) {
475                 PMD_INIT_LOG(ERR, "failed alloc memory for queue_info\n");
476                 return -1;
477         }
478         queue_info->vsi_id = vf->vsi_res->vsi_id;
479         queue_info->num_queue_pairs = nb_qpairs;
480         queue_cfg = queue_info->qpair;
481
482         num_rxq = dev->data->nb_rx_queues;
483         num_txq = dev->data->nb_tx_queues;
484         /*
485          * PF host driver required to configure queues in pairs, which means
486          * rxq_num should equals to txq_num. The actual usage won't always
487          * work that way. The solution is fills 0 with HW ring option in case
488          * they are not equal.
489          */
490         for (i = 0; i < nb_qpairs; i++) {
491                 /*Fill TX info */
492                 queue_cfg->txq.vsi_id = queue_info->vsi_id;
493                 queue_cfg->txq.queue_id = i;
494                 if (i < num_txq) {
495                         queue_cfg->txq.ring_len = txq[i]->nb_tx_desc;
496                         queue_cfg->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
497                 } else {
498                         queue_cfg->txq.ring_len = 0;
499                         queue_cfg->txq.dma_ring_addr = 0;
500                 }
501
502                 /* Fill RX info */
503                 queue_cfg->rxq.vsi_id = queue_info->vsi_id;
504                 queue_cfg->rxq.queue_id = i;
505                 queue_cfg->rxq.max_pkt_size = vf->max_pkt_len;
506                 if (i < num_rxq) {
507                         mbp_priv = rte_mempool_get_priv(rxq[i]->mp);
508                         queue_cfg->rxq.databuffer_size = mbp_priv->mbuf_data_room_size -
509                                                    RTE_PKTMBUF_HEADROOM;;
510                         queue_cfg->rxq.ring_len = rxq[i]->nb_rx_desc;
511                         queue_cfg->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;;
512                 } else {
513                         queue_cfg->rxq.ring_len = 0;
514                         queue_cfg->rxq.dma_ring_addr = 0;
515                         queue_cfg->rxq.databuffer_size = 0;
516                 }
517                 queue_cfg++;
518         }
519
520         args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
521         args.in_args = (u8 *)queue_info;
522         args.in_args_size = len;
523         args.out_buffer = cmd_result_buffer;
524         args.out_size = I40E_AQ_BUF_SZ;
525         err = i40evf_execute_vf_cmd(dev, &args);
526         if (err)
527                 PMD_DRV_LOG(ERR, "fail to execute command "
528                                 "OP_CONFIG_VSI_QUEUES\n");
529         rte_free(queue_info);
530
531         return err;
532 }
533
534 static int
535 i40evf_config_irq_map(struct rte_eth_dev *dev)
536 {
537         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
538         struct vf_cmd_info args;
539         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
540                 sizeof(struct i40e_virtchnl_vector_map)];
541         struct i40e_virtchnl_irq_map_info *map_info;
542         int i, err;
543         map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
544         map_info->num_vectors = 1;
545         map_info->vecmap[0].rxitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
546         map_info->vecmap[0].txitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
547         map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
548         /* Alway use default dynamic MSIX interrupt */
549         map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
550         /* Don't map any tx queue */
551         map_info->vecmap[0].txq_map = 0;
552         map_info->vecmap[0].rxq_map = 0;
553         for (i = 0; i < dev->data->nb_rx_queues; i++)
554                 map_info->vecmap[0].rxq_map |= 1 << i;
555
556         args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
557         args.in_args = (u8 *)cmd_buffer;
558         args.in_args_size = sizeof(cmd_buffer);
559         args.out_buffer = cmd_result_buffer;
560         args.out_size = I40E_AQ_BUF_SZ;
561         err = i40evf_execute_vf_cmd(dev, &args);
562         if (err)
563                 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES\n");
564
565         return err;
566 }
567
568 static int
569 i40evf_enable_queues(struct rte_eth_dev *dev)
570 {
571         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
572         struct i40e_virtchnl_queue_select queue_select;
573         int err, i;
574         struct vf_cmd_info args;
575
576         queue_select.vsi_id = vf->vsi_res->vsi_id;
577
578         queue_select.rx_queues = 0;
579         /* Enable configured RX queues */
580         for (i = 0; i < dev->data->nb_rx_queues; i++)
581                 queue_select.rx_queues |= 1 << i;
582
583         /* Enable configured TX queues */
584         queue_select.tx_queues = 0;
585         for (i = 0; i < dev->data->nb_tx_queues; i++)
586                 queue_select.tx_queues |= 1 << i;
587
588         args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
589         args.in_args = (u8 *)&queue_select;
590         args.in_args_size = sizeof(queue_select);
591         args.out_buffer = cmd_result_buffer;
592         args.out_size = I40E_AQ_BUF_SZ;
593         err = i40evf_execute_vf_cmd(dev, &args);
594         if (err)
595                 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES\n");
596
597         return err;
598 }
599
600 static int
601 i40evf_disable_queues(struct rte_eth_dev *dev)
602 {
603         struct i40e_virtchnl_queue_select queue_select;
604         int err, i;
605         struct vf_cmd_info args;
606
607         /* Enable configured RX queues */
608         queue_select.rx_queues = 0;
609         for (i = 0; i < dev->data->nb_rx_queues; i++)
610                 queue_select.rx_queues |= 1 << i;
611
612         /* Enable configured TX queues */
613         queue_select.tx_queues = 0;
614         for (i = 0; i < dev->data->nb_tx_queues; i++)
615                 queue_select.tx_queues |= 1 << i;
616
617         args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
618         args.in_args = (u8 *)&queue_select;
619         args.in_args_size = sizeof(queue_select);
620         args.out_buffer = cmd_result_buffer;
621         args.out_size = I40E_AQ_BUF_SZ;
622         err = i40evf_execute_vf_cmd(dev, &args);
623         if (err)
624                 PMD_DRV_LOG(ERR, "fail to execute command "
625                                         "OP_DISABLE_QUEUES\n");
626
627         return err;
628 }
629
630 static int
631 i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
632 {
633         struct i40e_virtchnl_ether_addr_list *list;
634         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
635         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
636                         sizeof(struct i40e_virtchnl_ether_addr)];
637         int err;
638         struct vf_cmd_info args;
639
640         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
641                 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x\n",
642                         addr->addr_bytes[0], addr->addr_bytes[1],
643                         addr->addr_bytes[2], addr->addr_bytes[3],
644                         addr->addr_bytes[4], addr->addr_bytes[5]);
645                 return -1;
646         }
647
648         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
649         list->vsi_id = vf->vsi_res->vsi_id;
650         list->num_elements = 1;
651         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
652                                         sizeof(addr->addr_bytes));
653
654         args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
655         args.in_args = cmd_buffer;
656         args.in_args_size = sizeof(cmd_buffer);
657         args.out_buffer = cmd_result_buffer;
658         args.out_size = I40E_AQ_BUF_SZ;
659         err = i40evf_execute_vf_cmd(dev, &args);
660         if (err)
661                 PMD_DRV_LOG(ERR, "fail to execute command "
662                                 "OP_ADD_ETHER_ADDRESS\n");
663
664         return err;
665 }
666
667 static int
668 i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
669 {
670         struct i40e_virtchnl_ether_addr_list *list;
671         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
672         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
673                         sizeof(struct i40e_virtchnl_ether_addr)];
674         int err;
675         struct vf_cmd_info args;
676
677         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
678                 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x\n",
679                         addr->addr_bytes[0], addr->addr_bytes[1],
680                         addr->addr_bytes[2], addr->addr_bytes[3],
681                         addr->addr_bytes[4], addr->addr_bytes[5]);
682                 return -1;
683         }
684
685         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
686         list->vsi_id = vf->vsi_res->vsi_id;
687         list->num_elements = 1;
688         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
689                         sizeof(addr->addr_bytes));
690
691         args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
692         args.in_args = cmd_buffer;
693         args.in_args_size = sizeof(cmd_buffer);
694         args.out_buffer = cmd_result_buffer;
695         args.out_size = I40E_AQ_BUF_SZ;
696         err = i40evf_execute_vf_cmd(dev, &args);
697         if (err)
698                 PMD_DRV_LOG(ERR, "fail to execute command "
699                                 "OP_DEL_ETHER_ADDRESS\n");
700
701         return err;
702 }
703
704 static int
705 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
706 {
707         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
708         struct i40e_virtchnl_queue_select q_stats;
709         struct i40e_eth_stats *pstats;
710         int err;
711         struct vf_cmd_info args;
712
713         memset(&q_stats, 0, sizeof(q_stats));
714         q_stats.vsi_id = vf->vsi_res->vsi_id;
715         args.ops = I40E_VIRTCHNL_OP_GET_STATS;
716         args.in_args = (u8 *)&q_stats;
717         args.in_args_size = sizeof(q_stats);
718         args.out_buffer = cmd_result_buffer;
719         args.out_size = I40E_AQ_BUF_SZ;
720
721         err = i40evf_execute_vf_cmd(dev, &args);
722         if (err) {
723                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS\n");
724                 return err;
725         }
726         pstats = (struct i40e_eth_stats *)args.out_buffer;
727         stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
728                                                 pstats->rx_broadcast;
729         stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
730                                                 pstats->tx_unicast;
731         stats->ierrors = pstats->rx_discards;
732         stats->oerrors = pstats->tx_errors + pstats->tx_discards;
733         stats->ibytes = pstats->rx_bytes;
734         stats->obytes = pstats->tx_bytes;
735
736         return 0;
737 }
738
739 static int
740 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
741 {
742         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
743         struct i40e_virtchnl_vlan_filter_list *vlan_list;
744         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
745                                                         sizeof(uint16_t)];
746         int err;
747         struct vf_cmd_info args;
748
749         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
750         vlan_list->vsi_id = vf->vsi_res->vsi_id;
751         vlan_list->num_elements = 1;
752         vlan_list->vlan_id[0] = vlanid;
753
754         args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
755         args.in_args = (u8 *)&cmd_buffer;
756         args.in_args_size = sizeof(cmd_buffer);
757         args.out_buffer = cmd_result_buffer;
758         args.out_size = I40E_AQ_BUF_SZ;
759         err = i40evf_execute_vf_cmd(dev, &args);
760         if (err)
761                 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN\n");
762
763         return err;
764 }
765
766 static int
767 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
768 {
769         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
770         struct i40e_virtchnl_vlan_filter_list *vlan_list;
771         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
772                                                         sizeof(uint16_t)];
773         int err;
774         struct vf_cmd_info args;
775
776         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
777         vlan_list->vsi_id = vf->vsi_res->vsi_id;
778         vlan_list->num_elements = 1;
779         vlan_list->vlan_id[0] = vlanid;
780
781         args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
782         args.in_args = (u8 *)&cmd_buffer;
783         args.in_args_size = sizeof(cmd_buffer);
784         args.out_buffer = cmd_result_buffer;
785         args.out_size = I40E_AQ_BUF_SZ;
786         err = i40evf_execute_vf_cmd(dev, &args);
787         if (err)
788                 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN\n");
789
790         return err;
791 }
792
793 static int
794 i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
795 {
796         int err;
797         struct vf_cmd_info args;
798         struct rte_eth_link *new_link;
799
800         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
801         args.in_args = NULL;
802         args.in_args_size = 0;
803         args.out_buffer = cmd_result_buffer;
804         args.out_size = I40E_AQ_BUF_SZ;
805         err = i40evf_execute_vf_cmd(dev, &args);
806         if (err) {
807                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT\n");
808                 return err;
809         }
810
811         new_link = (struct rte_eth_link *)args.out_buffer;
812         (void)rte_memcpy(link, new_link, sizeof(link));
813
814         return 0;
815 }
816
817 static struct rte_pci_id pci_id_i40evf_map[] = {
818 #define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
819 #include "rte_pci_dev_ids.h"
820 { .vendor_id = 0, /* sentinel */ },
821 };
822
823 static inline int
824 i40evf_dev_atomic_read_link_status(struct rte_eth_dev *dev,
825                                    struct rte_eth_link *link)
826 {
827         struct rte_eth_link *dst = link;
828         struct rte_eth_link *src = &(dev->data->dev_link);
829
830         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
831                                         *(uint64_t *)src) == 0)
832                 return -1;
833
834         return 0;
835 }
836
837 static inline int
838 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
839                                     struct rte_eth_link *link)
840 {
841         struct rte_eth_link *dst = &(dev->data->dev_link);
842         struct rte_eth_link *src = link;
843
844         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
845                                         *(uint64_t *)src) == 0)
846                 return -1;
847
848         return 0;
849 }
850
851 static int
852 i40evf_reset_vf(struct i40e_hw *hw)
853 {
854         int i, reset;
855
856         if (i40e_vf_reset(hw) != I40E_SUCCESS) {
857                 PMD_INIT_LOG(ERR, "Reset VF NIC failed\n");
858                 return -1;
859         }
860         /**
861           * After issuing vf reset command to pf, pf won't necessarily
862           * reset vf, it depends on what state it exactly is. If it's not
863           * initialized yet, it won't have vf reset since it's in a certain
864           * state. If not, it will try to reset. Even vf is reset, pf will
865           * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
866           * it to ACTIVE. In this duration, vf may not catch the moment that
867           * COMPLETE is set. So, for vf, we'll try to wait a long time.
868           */
869         rte_delay_ms(200);
870
871         for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
872                 reset = rd32(hw, I40E_VFGEN_RSTAT) &
873                         I40E_VFGEN_RSTAT_VFR_STATE_MASK;
874                 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
875                 if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
876                         break;
877                 else
878                         rte_delay_ms(50);
879         }
880
881         if (i >= MAX_RESET_WAIT_CNT) {
882                 PMD_INIT_LOG(ERR, "Reset VF NIC failed\n");
883                 return -1;
884         }
885
886         return 0;
887 }
888
889 static int
890 i40evf_init_vf(struct rte_eth_dev *dev)
891 {
892         int i, err, bufsz;
893         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
894         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
895
896         err = i40evf_set_mac_type(hw);
897         if (err) {
898                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d\n", err);
899                 goto err;
900         }
901
902         i40e_init_adminq_parameter(hw);
903         err = i40e_init_adminq(hw);
904         if (err) {
905                 PMD_INIT_LOG(ERR, "init_adminq failed: %d\n", err);
906                 goto err;
907         }
908
909
910         /* Reset VF and wait until it's complete */
911         if (i40evf_reset_vf(hw)) {
912                 PMD_INIT_LOG(ERR, "reset NIC failed\n");
913                 goto err_aq;
914         }
915
916         /* VF reset, shutdown admin queue and initialize again */
917         if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
918                 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed\n");
919                 return -1;
920         }
921
922         i40e_init_adminq_parameter(hw);
923         if (i40e_init_adminq(hw) != I40E_SUCCESS) {
924                 PMD_INIT_LOG(ERR, "init_adminq failed\n");
925                 return -1;
926         }
927         if (i40evf_check_api_version(dev) != 0) {
928                 PMD_INIT_LOG(ERR, "check_api version failed\n");
929                 goto err_aq;
930         }
931         bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
932                 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
933         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
934         if (!vf->vf_res) {
935                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory\n");
936                         goto err_aq;
937         }
938
939         if (i40evf_get_vf_resource(dev) != 0) {
940                 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed\n");
941                 goto err_alloc;
942         }
943
944         /* got VF config message back from PF, now we can parse it */
945         for (i = 0; i < vf->vf_res->num_vsis; i++) {
946                 if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
947                         vf->vsi_res = &vf->vf_res->vsi_res[i];
948         }
949
950         if (!vf->vsi_res) {
951                 PMD_INIT_LOG(ERR, "no LAN VSI found\n");
952                 goto err_alloc;
953         }
954
955         vf->vsi.vsi_id = vf->vsi_res->vsi_id;
956         vf->vsi.type = vf->vsi_res->vsi_type;
957         vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
958
959         /* check mac addr, if it's not valid, genrate one */
960         if (I40E_SUCCESS != i40e_validate_mac_addr(\
961                         vf->vsi_res->default_mac_addr))
962                 eth_random_addr(vf->vsi_res->default_mac_addr);
963
964         ether_addr_copy((struct ether_addr *)vf->vsi_res->default_mac_addr,
965                                         (struct ether_addr *)hw->mac.addr);
966
967         return 0;
968
969 err_alloc:
970         rte_free(vf->vf_res);
971 err_aq:
972         i40e_shutdown_adminq(hw); /* ignore error */
973 err:
974         return -1;
975 }
976
977 static int
978 i40evf_dev_init(__rte_unused struct eth_driver *eth_drv,
979                 struct rte_eth_dev *eth_dev)
980 {
981         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
982                         eth_dev->data->dev_private);
983
984         PMD_INIT_FUNC_TRACE();
985
986         /* assign ops func pointer */
987         eth_dev->dev_ops = &i40evf_eth_dev_ops;
988         eth_dev->rx_pkt_burst = &i40e_recv_pkts;
989         eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
990
991         /*
992          * For secondary processes, we don't initialise any further as primary
993          * has already done this work.
994          */
995         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
996                 if (eth_dev->data->scattered_rx)
997                         eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
998                 return 0;
999         }
1000
1001         hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
1002         hw->device_id = eth_dev->pci_dev->id.device_id;
1003         hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
1004         hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
1005         hw->bus.device = eth_dev->pci_dev->addr.devid;
1006         hw->bus.func = eth_dev->pci_dev->addr.function;
1007         hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1008
1009         if(i40evf_init_vf(eth_dev) != 0) {
1010                 PMD_INIT_LOG(ERR, "Init vf failed\n");
1011                 return -1;
1012         }
1013
1014         /* copy mac addr */
1015         eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1016                                         ETHER_ADDR_LEN, 0);
1017         if (eth_dev->data->mac_addrs == NULL) {
1018                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
1019                                 "store MAC addresses", ETHER_ADDR_LEN);
1020                 return -ENOMEM;
1021         }
1022         ether_addr_copy((struct ether_addr *)hw->mac.addr,
1023                 (struct ether_addr *)eth_dev->data->mac_addrs);
1024
1025         return 0;
1026 }
1027
1028 /*
1029  * virtual function driver struct
1030  */
1031 static struct eth_driver rte_i40evf_pmd = {
1032         {
1033                 .name = "rte_i40evf_pmd",
1034                 .id_table = pci_id_i40evf_map,
1035                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1036         },
1037         .eth_dev_init = i40evf_dev_init,
1038         .dev_private_size = sizeof(struct i40e_vf),
1039 };
1040
1041 /*
1042  * VF Driver initialization routine.
1043  * Invoked one at EAL init time.
1044  * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
1045  */
1046 static int
1047 rte_i40evf_pmd_init(const char *name __rte_unused,
1048                     const char *params __rte_unused)
1049 {
1050         DEBUGFUNC("rte_i40evf_pmd_init");
1051
1052         rte_eth_driver_register(&rte_i40evf_pmd);
1053
1054         return 0;
1055 }
1056
1057 static struct rte_driver rte_i40evf_driver = {
1058         .type = PMD_PDEV,
1059         .init = rte_i40evf_pmd_init,
1060 };
1061
1062 PMD_REGISTER_DRIVER(rte_i40evf_driver);
1063
1064 static int
1065 i40evf_dev_configure(__rte_unused struct rte_eth_dev *dev)
1066 {
1067         return 0;
1068 }
1069
1070 static int
1071 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1072 {
1073         int ret;
1074
1075         if (on)
1076                 ret = i40evf_add_vlan(dev, vlan_id);
1077         else
1078                 ret = i40evf_del_vlan(dev,vlan_id);
1079
1080         return ret;
1081 }
1082
1083 static int
1084 i40evf_rx_init(struct rte_eth_dev *dev)
1085 {
1086         uint16_t i, j;
1087         struct i40e_rx_queue **rxq =
1088                 (struct i40e_rx_queue **)dev->data->rx_queues;
1089         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1090
1091         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1092                 if (i40e_alloc_rx_queue_mbufs(rxq[i]) != 0) {
1093                         PMD_DRV_LOG(ERR, "alloc rx queues mbufs failed\n");
1094                         goto err;
1095                 }
1096                 rxq[i]->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(i);
1097                 I40E_PCI_REG_WRITE(rxq[i]->qrx_tail, rxq[i]->nb_rx_desc - 1);
1098         }
1099
1100         /* Flush the operation to write registers */
1101         I40EVF_WRITE_FLUSH(hw);
1102
1103         return 0;
1104
1105 err:
1106         /* Release all mbufs */
1107         for (j = 0; j < i; j++)
1108                 i40e_rx_queue_release_mbufs(rxq[j]);
1109
1110         return -1;
1111 }
1112
1113 static void
1114 i40evf_tx_init(struct rte_eth_dev *dev)
1115 {
1116         uint16_t i;
1117         struct i40e_tx_queue **txq =
1118                 (struct i40e_tx_queue **)dev->data->tx_queues;
1119         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1120
1121         for (i = 0; i < dev->data->nb_tx_queues; i++)
1122                 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1123 }
1124
1125 static inline void
1126 i40evf_enable_queues_intr(struct i40e_hw *hw)
1127 {
1128         I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1129                         I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1130                         I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1131 }
1132
1133 static inline void
1134 i40evf_disable_queues_intr(struct i40e_hw *hw)
1135 {
1136         I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1137                         0);
1138 }
1139
1140 static int
1141 i40evf_dev_start(struct rte_eth_dev *dev)
1142 {
1143         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1144         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1145         struct ether_addr mac_addr;
1146
1147         PMD_DRV_LOG(DEBUG, "i40evf_dev_start");
1148
1149         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1150         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1151                 if (vf->max_pkt_len <= ETHER_MAX_LEN ||
1152                         vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1153                         PMD_DRV_LOG(ERR, "maximum packet length must "
1154                                 "be larger than %u and smaller than %u,"
1155                                         "as jumbo frame is enabled\n",
1156                                                 (uint32_t)ETHER_MAX_LEN,
1157                                         (uint32_t)I40E_FRAME_SIZE_MAX);
1158                         return I40E_ERR_CONFIG;
1159                 }
1160         } else {
1161                 if (vf->max_pkt_len < ETHER_MIN_LEN ||
1162                         vf->max_pkt_len > ETHER_MAX_LEN) {
1163                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1164                                         "larger than %u and smaller than %u, "
1165                                         "as jumbo frame is disabled\n",
1166                                                 (uint32_t)ETHER_MIN_LEN,
1167                                                 (uint32_t)ETHER_MAX_LEN);
1168                         return I40E_ERR_CONFIG;
1169                 }
1170         }
1171
1172         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1173                                         dev->data->nb_tx_queues);
1174
1175         if (i40evf_rx_init(dev) != 0){
1176                 PMD_DRV_LOG(ERR, "failed to do RX init\n");
1177                 return -1;
1178         }
1179
1180         i40evf_tx_init(dev);
1181
1182         if (i40evf_configure_queues(dev) != 0) {
1183                 PMD_DRV_LOG(ERR, "configure queues failed\n");
1184                 goto err_queue;
1185         }
1186         if (i40evf_config_irq_map(dev)) {
1187                 PMD_DRV_LOG(ERR, "config_irq_map failed\n");
1188                 goto err_queue;
1189         }
1190
1191         /* Set mac addr */
1192         (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
1193                                 sizeof(mac_addr.addr_bytes));
1194         if (i40evf_add_mac_addr(dev, &mac_addr)) {
1195                 PMD_DRV_LOG(ERR, "Failed to add mac addr\n");
1196                 goto err_queue;
1197         }
1198
1199         if (i40evf_enable_queues(dev) != 0) {
1200                 PMD_DRV_LOG(ERR, "enable queues failed\n");
1201                 goto err_mac;
1202         }
1203         i40evf_enable_queues_intr(hw);
1204         return 0;
1205
1206 err_mac:
1207         i40evf_del_mac_addr(dev, &mac_addr);
1208 err_queue:
1209         i40e_dev_clear_queues(dev);
1210         return -1;
1211 }
1212
1213 static void
1214 i40evf_dev_stop(struct rte_eth_dev *dev)
1215 {
1216         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1217
1218         PMD_INIT_FUNC_TRACE();
1219
1220         i40evf_disable_queues_intr(hw);
1221         i40evf_disable_queues(dev);
1222         i40e_dev_clear_queues(dev);
1223 }
1224
1225 static int
1226 i40evf_dev_link_update(struct rte_eth_dev *dev,
1227                        __rte_unused int wait_to_complete)
1228 {
1229         struct rte_eth_link new_link;
1230         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1231         /*
1232          * DPDK pf host provide interfacet to acquire link status
1233          * while Linux driver does not
1234          */
1235         if (vf->host_is_dpdk)
1236                 i40evf_get_link_status(dev, &new_link);
1237         else {
1238                 /* Always assume it's up, for Linux driver PF host */
1239                 new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
1240                 new_link.link_speed  = ETH_LINK_SPEED_10000;
1241                 new_link.link_status = 1;
1242         }
1243         i40evf_dev_atomic_write_link_status(dev, &new_link);
1244
1245         return 0;
1246 }
1247
1248 static void
1249 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1250 {
1251         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1252         int ret;
1253
1254         /* If enabled, just return */
1255         if (vf->promisc_unicast_enabled)
1256                 return;
1257
1258         ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
1259         if (ret == 0)
1260                 vf->promisc_unicast_enabled = TRUE;
1261 }
1262
1263 static void
1264 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1265 {
1266         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1267         int ret;
1268
1269         /* If disabled, just return */
1270         if (!vf->promisc_unicast_enabled)
1271                 return;
1272
1273         ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
1274         if (ret == 0)
1275                 vf->promisc_unicast_enabled = FALSE;
1276 }
1277
1278 static void
1279 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1280 {
1281         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1282         int ret;
1283
1284         /* If enabled, just return */
1285         if (vf->promisc_multicast_enabled)
1286                 return;
1287
1288         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
1289         if (ret == 0)
1290                 vf->promisc_multicast_enabled = TRUE;
1291 }
1292
1293 static void
1294 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1295 {
1296         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1297         int ret;
1298
1299         /* If enabled, just return */
1300         if (!vf->promisc_multicast_enabled)
1301                 return;
1302
1303         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
1304         if (ret == 0)
1305                 vf->promisc_multicast_enabled = FALSE;
1306 }
1307
1308 static void
1309 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1310 {
1311         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1312
1313         memset(dev_info, 0, sizeof(*dev_info));
1314         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
1315         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
1316         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1317         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1318 }
1319
1320 static void
1321 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1322 {
1323         memset(stats, 0, sizeof(*stats));
1324         if (i40evf_get_statics(dev, stats))
1325                 PMD_DRV_LOG(ERR, "Get statics failed\n");
1326 }
1327
1328 static void
1329 i40evf_dev_close(struct rte_eth_dev *dev)
1330 {
1331         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1332
1333         i40evf_dev_stop(dev);
1334         i40evf_reset_vf(hw);
1335         i40e_shutdown_adminq(hw);
1336 }