i40e: add redirection table size in device info
[dpdk.git] / lib / librte_pmd_i40e / i40e_ethdev_vf.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
45
46 #include <rte_interrupts.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_dev.h>
62
63 #include "i40e_logs.h"
64 #include "i40e/i40e_prototype.h"
65 #include "i40e/i40e_adminq_cmd.h"
66 #include "i40e/i40e_type.h"
67
68 #include "i40e_rxtx.h"
69 #include "i40e_ethdev.h"
70 #include "i40e_pf.h"
71 #define I40EVF_VSI_DEFAULT_MSIX_INTR 1
72
73 /* busy wait delay in msec */
74 #define I40EVF_BUSY_WAIT_DELAY 10
75 #define I40EVF_BUSY_WAIT_COUNT 50
76 #define MAX_RESET_WAIT_CNT     20
77
78 struct i40evf_arq_msg_info {
79         enum i40e_virtchnl_ops ops;
80         enum i40e_status_code result;
81         uint16_t buf_len;
82         uint16_t msg_len;
83         uint8_t *msg;
84 };
85
86 struct vf_cmd_info {
87         enum i40e_virtchnl_ops ops;
88         uint8_t *in_args;
89         uint32_t in_args_size;
90         uint8_t *out_buffer;
91         /* Input & output type. pass in buffer size and pass out
92          * actual return result
93          */
94         uint32_t out_size;
95 };
96
97 enum i40evf_aq_result {
98         I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
99         I40EVF_MSG_NON,      /* Read nothing from admin queue */
100         I40EVF_MSG_SYS,      /* Read system msg from admin queue */
101         I40EVF_MSG_CMD,      /* Read async command result */
102 };
103
104 /* A share buffer to store the command result from PF driver */
105 static uint8_t cmd_result_buffer[I40E_AQ_BUF_SZ];
106
107 static int i40evf_dev_configure(struct rte_eth_dev *dev);
108 static int i40evf_dev_start(struct rte_eth_dev *dev);
109 static void i40evf_dev_stop(struct rte_eth_dev *dev);
110 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
111                                 struct rte_eth_dev_info *dev_info);
112 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
113                                   __rte_unused int wait_to_complete);
114 static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
115                                 struct rte_eth_stats *stats);
116 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
117                                   uint16_t vlan_id, int on);
118 static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
119 static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
120                                 int on);
121 static void i40evf_dev_close(struct rte_eth_dev *dev);
122 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
123 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
124 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
125 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
126 static int i40evf_get_link_status(struct rte_eth_dev *dev,
127                                   struct rte_eth_link *link);
128 static int i40evf_init_vlan(struct rte_eth_dev *dev);
129 static int i40evf_config_rss(struct i40e_vf *vf);
130 static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
131                                       struct rte_eth_rss_conf *rss_conf);
132 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
133                                         struct rte_eth_rss_conf *rss_conf);
134 static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
135                                      uint16_t rx_queue_id);
136 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
137                                     uint16_t rx_queue_id);
138 static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
139                                      uint16_t tx_queue_id);
140 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
141                                     uint16_t tx_queue_id);
142
143 /* Default hash key buffer for RSS */
144 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
145
146 static struct eth_dev_ops i40evf_eth_dev_ops = {
147         .dev_configure        = i40evf_dev_configure,
148         .dev_start            = i40evf_dev_start,
149         .dev_stop             = i40evf_dev_stop,
150         .promiscuous_enable   = i40evf_dev_promiscuous_enable,
151         .promiscuous_disable  = i40evf_dev_promiscuous_disable,
152         .allmulticast_enable  = i40evf_dev_allmulticast_enable,
153         .allmulticast_disable = i40evf_dev_allmulticast_disable,
154         .link_update          = i40evf_dev_link_update,
155         .stats_get            = i40evf_dev_stats_get,
156         .dev_close            = i40evf_dev_close,
157         .dev_infos_get        = i40evf_dev_info_get,
158         .vlan_filter_set      = i40evf_vlan_filter_set,
159         .vlan_offload_set     = i40evf_vlan_offload_set,
160         .vlan_pvid_set        = i40evf_vlan_pvid_set,
161         .rx_queue_start       = i40evf_dev_rx_queue_start,
162         .rx_queue_stop        = i40evf_dev_rx_queue_stop,
163         .tx_queue_start       = i40evf_dev_tx_queue_start,
164         .tx_queue_stop        = i40evf_dev_tx_queue_stop,
165         .rx_queue_setup       = i40e_dev_rx_queue_setup,
166         .rx_queue_release     = i40e_dev_rx_queue_release,
167         .tx_queue_setup       = i40e_dev_tx_queue_setup,
168         .tx_queue_release     = i40e_dev_tx_queue_release,
169         .rss_hash_update      = i40evf_dev_rss_hash_update,
170         .rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
171 };
172
173 static int
174 i40evf_set_mac_type(struct i40e_hw *hw)
175 {
176         int status = I40E_ERR_DEVICE_NOT_SUPPORTED;
177
178         if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
179                 switch (hw->device_id) {
180                 case I40E_DEV_ID_VF:
181                 case I40E_DEV_ID_VF_HV:
182                         hw->mac.type = I40E_MAC_VF;
183                         status = I40E_SUCCESS;
184                         break;
185                 default:
186                         ;
187                 }
188         }
189
190         return status;
191 }
192
193 /*
194  * Parse admin queue message.
195  *
196  * return value:
197  *  < 0: meet error
198  *  0: read sys msg
199  *  > 0: read cmd result
200  */
201 static enum i40evf_aq_result
202 i40evf_parse_pfmsg(struct i40e_vf *vf,
203                    struct i40e_arq_event_info *event,
204                    struct i40evf_arq_msg_info *data)
205 {
206         enum i40e_virtchnl_ops opcode = (enum i40e_virtchnl_ops)\
207                         rte_le_to_cpu_32(event->desc.cookie_high);
208         enum i40e_status_code retval = (enum i40e_status_code)\
209                         rte_le_to_cpu_32(event->desc.cookie_low);
210         enum i40evf_aq_result ret = I40EVF_MSG_CMD;
211
212         /* pf sys event */
213         if (opcode == I40E_VIRTCHNL_OP_EVENT) {
214                 struct i40e_virtchnl_pf_event *vpe =
215                         (struct i40e_virtchnl_pf_event *)event->msg_buf;
216
217                 /* Initialize ret to sys event */
218                 ret = I40EVF_MSG_SYS;
219                 switch (vpe->event) {
220                 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
221                         vf->link_up =
222                                 vpe->event_data.link_event.link_status;
223                         vf->pend_msg |= PFMSG_LINK_CHANGE;
224                         PMD_DRV_LOG(INFO, "Link status update:%s",
225                                     vf->link_up ? "up" : "down");
226                         break;
227                 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
228                         vf->vf_reset = true;
229                         vf->pend_msg |= PFMSG_RESET_IMPENDING;
230                         PMD_DRV_LOG(INFO, "vf is reseting");
231                         break;
232                 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
233                         vf->dev_closed = true;
234                         vf->pend_msg |= PFMSG_DRIVER_CLOSE;
235                         PMD_DRV_LOG(INFO, "PF driver closed");
236                         break;
237                 default:
238                         PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
239                                     __func__, vpe->event);
240                 }
241         } else {
242                 /* async reply msg on command issued by vf previously */
243                 ret = I40EVF_MSG_CMD;
244                 /* Actual data length read from PF */
245                 data->msg_len = event->msg_len;
246         }
247         /* fill the ops and result to notify VF */
248         data->result = retval;
249         data->ops = opcode;
250
251         return ret;
252 }
253
254 /*
255  * Read data in admin queue to get msg from pf driver
256  */
257 static enum i40evf_aq_result
258 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
259 {
260         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
261         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
262         struct i40e_arq_event_info event;
263         int ret;
264         enum i40evf_aq_result result = I40EVF_MSG_NON;
265
266         event.buf_len = data->buf_len;
267         event.msg_buf = data->msg;
268         ret = i40e_clean_arq_element(hw, &event, NULL);
269         /* Can't read any msg from adminQ */
270         if (ret) {
271                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
272                         result = I40EVF_MSG_NON;
273                 else
274                         result = I40EVF_MSG_ERR;
275                 return result;
276         }
277
278         /* Parse the event */
279         result = i40evf_parse_pfmsg(vf, &event, data);
280
281         return result;
282 }
283
284 /*
285  * Polling read until command result return from pf driver or meet error.
286  */
287 static int
288 i40evf_wait_cmd_done(struct rte_eth_dev *dev,
289                      struct i40evf_arq_msg_info *data)
290 {
291         int i = 0;
292         enum i40evf_aq_result ret;
293
294 #define MAX_TRY_TIMES 10
295 #define ASQ_DELAY_MS  50
296         do {
297                 /* Delay some time first */
298                 rte_delay_ms(ASQ_DELAY_MS);
299                 ret = i40evf_read_pfmsg(dev, data);
300                 if (ret == I40EVF_MSG_CMD)
301                         return 0;
302                 else if (ret == I40EVF_MSG_ERR)
303                         return -1;
304
305                 /* If don't read msg or read sys event, continue */
306         } while(i++ < MAX_TRY_TIMES);
307
308         return -1;
309 }
310
311 /**
312  * clear current command. Only call in case execute
313  * _atomic_set_cmd successfully.
314  */
315 static inline void
316 _clear_cmd(struct i40e_vf *vf)
317 {
318         rte_wmb();
319         vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
320 }
321
322 /*
323  * Check there is pending cmd in execution. If none, set new command.
324  */
325 static inline int
326 _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
327 {
328         int ret = rte_atomic32_cmpset(&vf->pend_cmd,
329                         I40E_VIRTCHNL_OP_UNKNOWN, ops);
330
331         if (!ret)
332                 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
333
334         return !ret;
335 }
336
337 static int
338 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
339 {
340         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
341         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
342         int err = -1;
343         struct i40evf_arq_msg_info info;
344
345         if (_atomic_set_cmd(vf, args->ops))
346                 return -1;
347
348         info.msg = args->out_buffer;
349         info.buf_len = args->out_size;
350         info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
351         info.result = I40E_SUCCESS;
352
353         err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
354                      args->in_args, args->in_args_size, NULL);
355         if (err) {
356                 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
357                 return err;
358         }
359
360         err = i40evf_wait_cmd_done(dev, &info);
361         /* read message and it's expected one */
362         if (!err && args->ops == info.ops)
363                 _clear_cmd(vf);
364         else if (err)
365                 PMD_DRV_LOG(ERR, "Failed to read message from AdminQ");
366         else if (args->ops != info.ops)
367                 PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
368                             args->ops, info.ops);
369
370         return (err | info.result);
371 }
372
373 /*
374  * Check API version with sync wait until version read or fail from admin queue
375  */
376 static int
377 i40evf_check_api_version(struct rte_eth_dev *dev)
378 {
379         struct i40e_virtchnl_version_info version, *pver;
380         int err;
381         struct vf_cmd_info args;
382         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
383
384         version.major = I40E_VIRTCHNL_VERSION_MAJOR;
385         version.minor = I40E_VIRTCHNL_VERSION_MINOR;
386
387         args.ops = I40E_VIRTCHNL_OP_VERSION;
388         args.in_args = (uint8_t *)&version;
389         args.in_args_size = sizeof(version);
390         args.out_buffer = cmd_result_buffer;
391         args.out_size = I40E_AQ_BUF_SZ;
392
393         err = i40evf_execute_vf_cmd(dev, &args);
394         if (err) {
395                 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
396                 return err;
397         }
398
399         pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
400         vf->version_major = pver->major;
401         vf->version_minor = pver->minor;
402         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
403                 PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
404         else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
405                 (vf->version_minor == I40E_VIRTCHNL_VERSION_MINOR))
406                 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
407         else {
408                 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
409                                         vf->version_major, vf->version_minor,
410                                                 I40E_VIRTCHNL_VERSION_MAJOR,
411                                                 I40E_VIRTCHNL_VERSION_MINOR);
412                 return -1;
413         }
414
415         return 0;
416 }
417
418 static int
419 i40evf_get_vf_resource(struct rte_eth_dev *dev)
420 {
421         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
422         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
423         int err;
424         struct vf_cmd_info args;
425         uint32_t len;
426
427         args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
428         args.in_args = NULL;
429         args.in_args_size = 0;
430         args.out_buffer = cmd_result_buffer;
431         args.out_size = I40E_AQ_BUF_SZ;
432
433         err = i40evf_execute_vf_cmd(dev, &args);
434
435         if (err) {
436                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
437                 return err;
438         }
439
440         len =  sizeof(struct i40e_virtchnl_vf_resource) +
441                 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
442
443         (void)rte_memcpy(vf->vf_res, args.out_buffer,
444                         RTE_MIN(args.out_size, len));
445         i40e_vf_parse_hw_config(hw, vf->vf_res);
446
447         return 0;
448 }
449
450 static int
451 i40evf_config_promisc(struct rte_eth_dev *dev,
452                       bool enable_unicast,
453                       bool enable_multicast)
454 {
455         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
456         int err;
457         struct vf_cmd_info args;
458         struct i40e_virtchnl_promisc_info promisc;
459
460         promisc.flags = 0;
461         promisc.vsi_id = vf->vsi_res->vsi_id;
462
463         if (enable_unicast)
464                 promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
465
466         if (enable_multicast)
467                 promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
468
469         args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
470         args.in_args = (uint8_t *)&promisc;
471         args.in_args_size = sizeof(promisc);
472         args.out_buffer = cmd_result_buffer;
473         args.out_size = I40E_AQ_BUF_SZ;
474
475         err = i40evf_execute_vf_cmd(dev, &args);
476
477         if (err)
478                 PMD_DRV_LOG(ERR, "fail to execute command "
479                             "CONFIG_PROMISCUOUS_MODE");
480         return err;
481 }
482
483 /* Configure vlan and double vlan offload. Use flag to specify which part to configure */
484 static int
485 i40evf_config_vlan_offload(struct rte_eth_dev *dev,
486                                 bool enable_vlan_strip)
487 {
488         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
489         int err;
490         struct vf_cmd_info args;
491         struct i40e_virtchnl_vlan_offload_info offload;
492
493         offload.vsi_id = vf->vsi_res->vsi_id;
494         offload.enable_vlan_strip = enable_vlan_strip;
495
496         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
497         args.in_args = (uint8_t *)&offload;
498         args.in_args_size = sizeof(offload);
499         args.out_buffer = cmd_result_buffer;
500         args.out_size = I40E_AQ_BUF_SZ;
501
502         err = i40evf_execute_vf_cmd(dev, &args);
503         if (err)
504                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
505
506         return err;
507 }
508
509 static int
510 i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
511                                 struct i40e_vsi_vlan_pvid_info *info)
512 {
513         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
514         int err;
515         struct vf_cmd_info args;
516         struct i40e_virtchnl_pvid_info tpid_info;
517
518         if (dev == NULL || info == NULL) {
519                 PMD_DRV_LOG(ERR, "invalid parameters");
520                 return I40E_ERR_PARAM;
521         }
522
523         memset(&tpid_info, 0, sizeof(tpid_info));
524         tpid_info.vsi_id = vf->vsi_res->vsi_id;
525         (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
526
527         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
528         args.in_args = (uint8_t *)&tpid_info;
529         args.in_args_size = sizeof(tpid_info);
530         args.out_buffer = cmd_result_buffer;
531         args.out_size = I40E_AQ_BUF_SZ;
532
533         err = i40evf_execute_vf_cmd(dev, &args);
534         if (err)
535                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
536
537         return err;
538 }
539
540 static void
541 i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
542                                   uint16_t vsi_id,
543                                   uint16_t queue_id,
544                                   uint16_t nb_txq,
545                                   struct i40e_tx_queue *txq)
546 {
547         txq_info->vsi_id = vsi_id;
548         txq_info->queue_id = queue_id;
549         if (queue_id < nb_txq) {
550                 txq_info->ring_len = txq->nb_tx_desc;
551                 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
552         }
553 }
554
555 static void
556 i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
557                                   uint16_t vsi_id,
558                                   uint16_t queue_id,
559                                   uint16_t nb_rxq,
560                                   uint32_t max_pkt_size,
561                                   struct i40e_rx_queue *rxq)
562 {
563         rxq_info->vsi_id = vsi_id;
564         rxq_info->queue_id = queue_id;
565         rxq_info->max_pkt_size = max_pkt_size;
566         if (queue_id < nb_rxq) {
567                 struct rte_pktmbuf_pool_private *mbp_priv;
568
569                 rxq_info->ring_len = rxq->nb_rx_desc;
570                 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
571                 mbp_priv = rte_mempool_get_priv(rxq->mp);
572                 rxq_info->databuffer_size =
573                         mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
574         }
575 }
576
577 /* It configures VSI queues to co-work with Linux PF host */
578 static int
579 i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
580 {
581         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
582         struct i40e_rx_queue **rxq =
583                 (struct i40e_rx_queue **)dev->data->rx_queues;
584         struct i40e_tx_queue **txq =
585                 (struct i40e_tx_queue **)dev->data->tx_queues;
586         struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
587         struct i40e_virtchnl_queue_pair_info *vc_qpi;
588         struct vf_cmd_info args;
589         uint16_t i, nb_qp = vf->num_queue_pairs;
590         const uint32_t size =
591                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
592         uint8_t buff[size];
593         int ret;
594
595         memset(buff, 0, sizeof(buff));
596         vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
597         vc_vqci->vsi_id = vf->vsi_res->vsi_id;
598         vc_vqci->num_queue_pairs = nb_qp;
599
600         for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
601                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
602                         vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
603                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
604                         vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
605                                         vf->max_pkt_len, rxq[i]);
606         }
607         memset(&args, 0, sizeof(args));
608         args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
609         args.in_args = (uint8_t *)vc_vqci;
610         args.in_args_size = size;
611         args.out_buffer = cmd_result_buffer;
612         args.out_size = I40E_AQ_BUF_SZ;
613         ret = i40evf_execute_vf_cmd(dev, &args);
614         if (ret)
615                 PMD_DRV_LOG(ERR, "Failed to execute command of "
616                         "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
617
618         return ret;
619 }
620
621 /* It configures VSI queues to co-work with DPDK PF host */
622 static int
623 i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
624 {
625         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
626         struct i40e_rx_queue **rxq =
627                 (struct i40e_rx_queue **)dev->data->rx_queues;
628         struct i40e_tx_queue **txq =
629                 (struct i40e_tx_queue **)dev->data->tx_queues;
630         struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
631         struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
632         struct vf_cmd_info args;
633         uint16_t i, nb_qp = vf->num_queue_pairs;
634         const uint32_t size =
635                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
636         uint8_t buff[size];
637         int ret;
638
639         memset(buff, 0, sizeof(buff));
640         vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
641         vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
642         vc_vqcei->num_queue_pairs = nb_qp;
643         vc_qpei = vc_vqcei->qpair;
644         for (i = 0; i < nb_qp; i++, vc_qpei++) {
645                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
646                         vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
647                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
648                         vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
649                                         vf->max_pkt_len, rxq[i]);
650                 if (i < dev->data->nb_rx_queues)
651                         /*
652                          * It adds extra info for configuring VSI queues, which
653                          * is needed to enable the configurable crc stripping
654                          * in VF.
655                          */
656                         vc_qpei->rxq_ext.crcstrip =
657                                 dev->data->dev_conf.rxmode.hw_strip_crc;
658         }
659         memset(&args, 0, sizeof(args));
660         args.ops =
661                 (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
662         args.in_args = (uint8_t *)vc_vqcei;
663         args.in_args_size = size;
664         args.out_buffer = cmd_result_buffer;
665         args.out_size = I40E_AQ_BUF_SZ;
666         ret = i40evf_execute_vf_cmd(dev, &args);
667         if (ret)
668                 PMD_DRV_LOG(ERR, "Failed to execute command of "
669                         "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
670
671         return ret;
672 }
673
674 static int
675 i40evf_configure_queues(struct rte_eth_dev *dev)
676 {
677         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
678
679         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
680                 /* To support DPDK PF host */
681                 return i40evf_configure_vsi_queues_ext(dev);
682         else
683                 /* To support Linux PF host */
684                 return i40evf_configure_vsi_queues(dev);
685 }
686
687 static int
688 i40evf_config_irq_map(struct rte_eth_dev *dev)
689 {
690         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
691         struct vf_cmd_info args;
692         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
693                 sizeof(struct i40e_virtchnl_vector_map)];
694         struct i40e_virtchnl_irq_map_info *map_info;
695         int i, err;
696         map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
697         map_info->num_vectors = 1;
698         map_info->vecmap[0].rxitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
699         map_info->vecmap[0].txitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
700         map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
701         /* Alway use default dynamic MSIX interrupt */
702         map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
703         /* Don't map any tx queue */
704         map_info->vecmap[0].txq_map = 0;
705         map_info->vecmap[0].rxq_map = 0;
706         for (i = 0; i < dev->data->nb_rx_queues; i++)
707                 map_info->vecmap[0].rxq_map |= 1 << i;
708
709         args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
710         args.in_args = (u8 *)cmd_buffer;
711         args.in_args_size = sizeof(cmd_buffer);
712         args.out_buffer = cmd_result_buffer;
713         args.out_size = I40E_AQ_BUF_SZ;
714         err = i40evf_execute_vf_cmd(dev, &args);
715         if (err)
716                 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
717
718         return err;
719 }
720
721 static int
722 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
723                                 bool on)
724 {
725         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
726         struct i40e_virtchnl_queue_select queue_select;
727         int err;
728         struct vf_cmd_info args;
729         memset(&queue_select, 0, sizeof(queue_select));
730         queue_select.vsi_id = vf->vsi_res->vsi_id;
731
732         if (isrx)
733                 queue_select.rx_queues |= 1 << qid;
734         else
735                 queue_select.tx_queues |= 1 << qid;
736
737         if (on)
738                 args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
739         else
740                 args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
741         args.in_args = (u8 *)&queue_select;
742         args.in_args_size = sizeof(queue_select);
743         args.out_buffer = cmd_result_buffer;
744         args.out_size = I40E_AQ_BUF_SZ;
745         err = i40evf_execute_vf_cmd(dev, &args);
746         if (err)
747                 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
748                             isrx ? "RX" : "TX", qid, on ? "on" : "off");
749
750         return err;
751 }
752
753 static int
754 i40evf_start_queues(struct rte_eth_dev *dev)
755 {
756         struct rte_eth_dev_data *dev_data = dev->data;
757         int i;
758         struct i40e_rx_queue *rxq;
759         struct i40e_tx_queue *txq;
760
761         for (i = 0; i < dev->data->nb_rx_queues; i++) {
762                 rxq = dev_data->rx_queues[i];
763                 if (rxq->rx_deferred_start)
764                         continue;
765                 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
766                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
767                         return -1;
768                 }
769         }
770
771         for (i = 0; i < dev->data->nb_tx_queues; i++) {
772                 txq = dev_data->tx_queues[i];
773                 if (txq->tx_deferred_start)
774                         continue;
775                 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
776                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
777                         return -1;
778                 }
779         }
780
781         return 0;
782 }
783
784 static int
785 i40evf_stop_queues(struct rte_eth_dev *dev)
786 {
787         int i;
788
789         /* Stop TX queues first */
790         for (i = 0; i < dev->data->nb_tx_queues; i++) {
791                 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
792                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
793                         return -1;
794                 }
795         }
796
797         /* Then stop RX queues */
798         for (i = 0; i < dev->data->nb_rx_queues; i++) {
799                 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
800                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
801                         return -1;
802                 }
803         }
804
805         return 0;
806 }
807
808 static int
809 i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
810 {
811         struct i40e_virtchnl_ether_addr_list *list;
812         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
813         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
814                         sizeof(struct i40e_virtchnl_ether_addr)];
815         int err;
816         struct vf_cmd_info args;
817
818         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
819                 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
820                             addr->addr_bytes[0], addr->addr_bytes[1],
821                             addr->addr_bytes[2], addr->addr_bytes[3],
822                             addr->addr_bytes[4], addr->addr_bytes[5]);
823                 return -1;
824         }
825
826         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
827         list->vsi_id = vf->vsi_res->vsi_id;
828         list->num_elements = 1;
829         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
830                                         sizeof(addr->addr_bytes));
831
832         args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
833         args.in_args = cmd_buffer;
834         args.in_args_size = sizeof(cmd_buffer);
835         args.out_buffer = cmd_result_buffer;
836         args.out_size = I40E_AQ_BUF_SZ;
837         err = i40evf_execute_vf_cmd(dev, &args);
838         if (err)
839                 PMD_DRV_LOG(ERR, "fail to execute command "
840                             "OP_ADD_ETHER_ADDRESS");
841
842         return err;
843 }
844
845 static int
846 i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
847 {
848         struct i40e_virtchnl_ether_addr_list *list;
849         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
850         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
851                         sizeof(struct i40e_virtchnl_ether_addr)];
852         int err;
853         struct vf_cmd_info args;
854
855         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
856                 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
857                             addr->addr_bytes[0], addr->addr_bytes[1],
858                             addr->addr_bytes[2], addr->addr_bytes[3],
859                             addr->addr_bytes[4], addr->addr_bytes[5]);
860                 return -1;
861         }
862
863         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
864         list->vsi_id = vf->vsi_res->vsi_id;
865         list->num_elements = 1;
866         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
867                         sizeof(addr->addr_bytes));
868
869         args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
870         args.in_args = cmd_buffer;
871         args.in_args_size = sizeof(cmd_buffer);
872         args.out_buffer = cmd_result_buffer;
873         args.out_size = I40E_AQ_BUF_SZ;
874         err = i40evf_execute_vf_cmd(dev, &args);
875         if (err)
876                 PMD_DRV_LOG(ERR, "fail to execute command "
877                             "OP_DEL_ETHER_ADDRESS");
878
879         return err;
880 }
881
882 static int
883 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
884 {
885         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
886         struct i40e_virtchnl_queue_select q_stats;
887         struct i40e_eth_stats *pstats;
888         int err;
889         struct vf_cmd_info args;
890
891         memset(&q_stats, 0, sizeof(q_stats));
892         q_stats.vsi_id = vf->vsi_res->vsi_id;
893         args.ops = I40E_VIRTCHNL_OP_GET_STATS;
894         args.in_args = (u8 *)&q_stats;
895         args.in_args_size = sizeof(q_stats);
896         args.out_buffer = cmd_result_buffer;
897         args.out_size = I40E_AQ_BUF_SZ;
898
899         err = i40evf_execute_vf_cmd(dev, &args);
900         if (err) {
901                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
902                 return err;
903         }
904         pstats = (struct i40e_eth_stats *)args.out_buffer;
905         stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
906                                                 pstats->rx_broadcast;
907         stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
908                                                 pstats->tx_unicast;
909         stats->ierrors = pstats->rx_discards;
910         stats->oerrors = pstats->tx_errors + pstats->tx_discards;
911         stats->ibytes = pstats->rx_bytes;
912         stats->obytes = pstats->tx_bytes;
913
914         return 0;
915 }
916
917 static int
918 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
919 {
920         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
921         struct i40e_virtchnl_vlan_filter_list *vlan_list;
922         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
923                                                         sizeof(uint16_t)];
924         int err;
925         struct vf_cmd_info args;
926
927         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
928         vlan_list->vsi_id = vf->vsi_res->vsi_id;
929         vlan_list->num_elements = 1;
930         vlan_list->vlan_id[0] = vlanid;
931
932         args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
933         args.in_args = (u8 *)&cmd_buffer;
934         args.in_args_size = sizeof(cmd_buffer);
935         args.out_buffer = cmd_result_buffer;
936         args.out_size = I40E_AQ_BUF_SZ;
937         err = i40evf_execute_vf_cmd(dev, &args);
938         if (err)
939                 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
940
941         return err;
942 }
943
944 static int
945 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
946 {
947         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
948         struct i40e_virtchnl_vlan_filter_list *vlan_list;
949         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
950                                                         sizeof(uint16_t)];
951         int err;
952         struct vf_cmd_info args;
953
954         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
955         vlan_list->vsi_id = vf->vsi_res->vsi_id;
956         vlan_list->num_elements = 1;
957         vlan_list->vlan_id[0] = vlanid;
958
959         args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
960         args.in_args = (u8 *)&cmd_buffer;
961         args.in_args_size = sizeof(cmd_buffer);
962         args.out_buffer = cmd_result_buffer;
963         args.out_size = I40E_AQ_BUF_SZ;
964         err = i40evf_execute_vf_cmd(dev, &args);
965         if (err)
966                 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
967
968         return err;
969 }
970
971 static int
972 i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
973 {
974         int err;
975         struct vf_cmd_info args;
976         struct rte_eth_link *new_link;
977
978         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
979         args.in_args = NULL;
980         args.in_args_size = 0;
981         args.out_buffer = cmd_result_buffer;
982         args.out_size = I40E_AQ_BUF_SZ;
983         err = i40evf_execute_vf_cmd(dev, &args);
984         if (err) {
985                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
986                 return err;
987         }
988
989         new_link = (struct rte_eth_link *)args.out_buffer;
990         (void)rte_memcpy(link, new_link, sizeof(*link));
991
992         return 0;
993 }
994
995 static struct rte_pci_id pci_id_i40evf_map[] = {
996 #define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
997 #include "rte_pci_dev_ids.h"
998 { .vendor_id = 0, /* sentinel */ },
999 };
1000
1001 static inline int
1002 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
1003                                     struct rte_eth_link *link)
1004 {
1005         struct rte_eth_link *dst = &(dev->data->dev_link);
1006         struct rte_eth_link *src = link;
1007
1008         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1009                                         *(uint64_t *)src) == 0)
1010                 return -1;
1011
1012         return 0;
1013 }
1014
1015 static int
1016 i40evf_reset_vf(struct i40e_hw *hw)
1017 {
1018         int i, reset;
1019
1020         if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1021                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1022                 return -1;
1023         }
1024         /**
1025           * After issuing vf reset command to pf, pf won't necessarily
1026           * reset vf, it depends on what state it exactly is. If it's not
1027           * initialized yet, it won't have vf reset since it's in a certain
1028           * state. If not, it will try to reset. Even vf is reset, pf will
1029           * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1030           * it to ACTIVE. In this duration, vf may not catch the moment that
1031           * COMPLETE is set. So, for vf, we'll try to wait a long time.
1032           */
1033         rte_delay_ms(200);
1034
1035         for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1036                 reset = rd32(hw, I40E_VFGEN_RSTAT) &
1037                         I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1038                 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1039                 if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
1040                         break;
1041                 else
1042                         rte_delay_ms(50);
1043         }
1044
1045         if (i >= MAX_RESET_WAIT_CNT) {
1046                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1047                 return -1;
1048         }
1049
1050         return 0;
1051 }
1052
1053 static int
1054 i40evf_init_vf(struct rte_eth_dev *dev)
1055 {
1056         int i, err, bufsz;
1057         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1058         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1059
1060         vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1061         vf->dev_data = dev->data;
1062         err = i40evf_set_mac_type(hw);
1063         if (err) {
1064                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1065                 goto err;
1066         }
1067
1068         i40e_init_adminq_parameter(hw);
1069         err = i40e_init_adminq(hw);
1070         if (err) {
1071                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1072                 goto err;
1073         }
1074
1075
1076         /* Reset VF and wait until it's complete */
1077         if (i40evf_reset_vf(hw)) {
1078                 PMD_INIT_LOG(ERR, "reset NIC failed");
1079                 goto err_aq;
1080         }
1081
1082         /* VF reset, shutdown admin queue and initialize again */
1083         if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1084                 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1085                 return -1;
1086         }
1087
1088         i40e_init_adminq_parameter(hw);
1089         if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1090                 PMD_INIT_LOG(ERR, "init_adminq failed");
1091                 return -1;
1092         }
1093         if (i40evf_check_api_version(dev) != 0) {
1094                 PMD_INIT_LOG(ERR, "check_api version failed");
1095                 goto err_aq;
1096         }
1097         bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1098                 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1099         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1100         if (!vf->vf_res) {
1101                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1102                         goto err_aq;
1103         }
1104
1105         if (i40evf_get_vf_resource(dev) != 0) {
1106                 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1107                 goto err_alloc;
1108         }
1109
1110         /* got VF config message back from PF, now we can parse it */
1111         for (i = 0; i < vf->vf_res->num_vsis; i++) {
1112                 if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
1113                         vf->vsi_res = &vf->vf_res->vsi_res[i];
1114         }
1115
1116         if (!vf->vsi_res) {
1117                 PMD_INIT_LOG(ERR, "no LAN VSI found");
1118                 goto err_alloc;
1119         }
1120
1121         vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1122         vf->vsi.type = vf->vsi_res->vsi_type;
1123         vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1124
1125         /* check mac addr, if it's not valid, genrate one */
1126         if (I40E_SUCCESS != i40e_validate_mac_addr(\
1127                         vf->vsi_res->default_mac_addr))
1128                 eth_random_addr(vf->vsi_res->default_mac_addr);
1129
1130         ether_addr_copy((struct ether_addr *)vf->vsi_res->default_mac_addr,
1131                                         (struct ether_addr *)hw->mac.addr);
1132
1133         return 0;
1134
1135 err_alloc:
1136         rte_free(vf->vf_res);
1137 err_aq:
1138         i40e_shutdown_adminq(hw); /* ignore error */
1139 err:
1140         return -1;
1141 }
1142
1143 static int
1144 i40evf_dev_init(__rte_unused struct eth_driver *eth_drv,
1145                 struct rte_eth_dev *eth_dev)
1146 {
1147         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
1148                         eth_dev->data->dev_private);
1149
1150         PMD_INIT_FUNC_TRACE();
1151
1152         /* assign ops func pointer */
1153         eth_dev->dev_ops = &i40evf_eth_dev_ops;
1154         eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1155         eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1156
1157         /*
1158          * For secondary processes, we don't initialise any further as primary
1159          * has already done this work.
1160          */
1161         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1162                 if (eth_dev->data->scattered_rx)
1163                         eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
1164                 return 0;
1165         }
1166
1167         hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
1168         hw->device_id = eth_dev->pci_dev->id.device_id;
1169         hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
1170         hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
1171         hw->bus.device = eth_dev->pci_dev->addr.devid;
1172         hw->bus.func = eth_dev->pci_dev->addr.function;
1173         hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1174
1175         if(i40evf_init_vf(eth_dev) != 0) {
1176                 PMD_INIT_LOG(ERR, "Init vf failed");
1177                 return -1;
1178         }
1179
1180         /* copy mac addr */
1181         eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1182                                         ETHER_ADDR_LEN, 0);
1183         if (eth_dev->data->mac_addrs == NULL) {
1184                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
1185                                 "store MAC addresses", ETHER_ADDR_LEN);
1186                 return -ENOMEM;
1187         }
1188         ether_addr_copy((struct ether_addr *)hw->mac.addr,
1189                 (struct ether_addr *)eth_dev->data->mac_addrs);
1190
1191         return 0;
1192 }
1193
1194 /*
1195  * virtual function driver struct
1196  */
1197 static struct eth_driver rte_i40evf_pmd = {
1198         {
1199                 .name = "rte_i40evf_pmd",
1200                 .id_table = pci_id_i40evf_map,
1201                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1202         },
1203         .eth_dev_init = i40evf_dev_init,
1204         .dev_private_size = sizeof(struct i40e_vf),
1205 };
1206
1207 /*
1208  * VF Driver initialization routine.
1209  * Invoked one at EAL init time.
1210  * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
1211  */
1212 static int
1213 rte_i40evf_pmd_init(const char *name __rte_unused,
1214                     const char *params __rte_unused)
1215 {
1216         PMD_INIT_FUNC_TRACE();
1217
1218         rte_eth_driver_register(&rte_i40evf_pmd);
1219
1220         return 0;
1221 }
1222
1223 static struct rte_driver rte_i40evf_driver = {
1224         .type = PMD_PDEV,
1225         .init = rte_i40evf_pmd_init,
1226 };
1227
1228 PMD_REGISTER_DRIVER(rte_i40evf_driver);
1229
1230 static int
1231 i40evf_dev_configure(struct rte_eth_dev *dev)
1232 {
1233         return i40evf_init_vlan(dev);
1234 }
1235
1236 static int
1237 i40evf_init_vlan(struct rte_eth_dev *dev)
1238 {
1239         struct rte_eth_dev_data *data = dev->data;
1240         int ret;
1241
1242         /* Apply vlan offload setting */
1243         i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1244
1245         /* Apply pvid setting */
1246         ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
1247                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
1248         return ret;
1249 }
1250
1251 static void
1252 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1253 {
1254         bool enable_vlan_strip = 0;
1255         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1256         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1257
1258         /* Linux pf host doesn't support vlan offload yet */
1259         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1260                 /* Vlan stripping setting */
1261                 if (mask & ETH_VLAN_STRIP_MASK) {
1262                         /* Enable or disable VLAN stripping */
1263                         if (dev_conf->rxmode.hw_vlan_strip)
1264                                 enable_vlan_strip = 1;
1265                         else
1266                                 enable_vlan_strip = 0;
1267
1268                         i40evf_config_vlan_offload(dev, enable_vlan_strip);
1269                 }
1270         }
1271 }
1272
1273 static int
1274 i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1275 {
1276         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1277         struct i40e_vsi_vlan_pvid_info info;
1278         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1279
1280         memset(&info, 0, sizeof(info));
1281         info.on = on;
1282
1283         /* Linux pf host don't support vlan offload yet */
1284         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1285                 if (info.on)
1286                         info.config.pvid = pvid;
1287                 else {
1288                         info.config.reject.tagged =
1289                                 dev_conf->txmode.hw_vlan_reject_tagged;
1290                         info.config.reject.untagged =
1291                                 dev_conf->txmode.hw_vlan_reject_untagged;
1292                 }
1293                 return i40evf_config_vlan_pvid(dev, &info);
1294         }
1295
1296         return 0;
1297 }
1298
1299 static int
1300 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1301 {
1302         struct i40e_rx_queue *rxq;
1303         int err = 0;
1304         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1305
1306         PMD_INIT_FUNC_TRACE();
1307
1308         if (rx_queue_id < dev->data->nb_rx_queues) {
1309                 rxq = dev->data->rx_queues[rx_queue_id];
1310
1311                 err = i40e_alloc_rx_queue_mbufs(rxq);
1312                 if (err) {
1313                         PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1314                         return err;
1315                 }
1316
1317                 rte_wmb();
1318
1319                 /* Init the RX tail register. */
1320                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1321                 I40EVF_WRITE_FLUSH(hw);
1322
1323                 /* Ready to switch the queue on */
1324                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1325
1326                 if (err)
1327                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1328                                     rx_queue_id);
1329         }
1330
1331         return err;
1332 }
1333
1334 static int
1335 i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1336 {
1337         struct i40e_rx_queue *rxq;
1338         int err;
1339
1340         if (rx_queue_id < dev->data->nb_rx_queues) {
1341                 rxq = dev->data->rx_queues[rx_queue_id];
1342
1343                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1344
1345                 if (err) {
1346                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1347                                     rx_queue_id);
1348                         return err;
1349                 }
1350
1351                 i40e_rx_queue_release_mbufs(rxq);
1352                 i40e_reset_rx_queue(rxq);
1353         }
1354
1355         return 0;
1356 }
1357
1358 static int
1359 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1360 {
1361         int err = 0;
1362
1363         PMD_INIT_FUNC_TRACE();
1364
1365         if (tx_queue_id < dev->data->nb_tx_queues) {
1366
1367                 /* Ready to switch the queue on */
1368                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1369
1370                 if (err)
1371                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1372                                     tx_queue_id);
1373         }
1374
1375         return err;
1376 }
1377
1378 static int
1379 i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1380 {
1381         struct i40e_tx_queue *txq;
1382         int err;
1383
1384         if (tx_queue_id < dev->data->nb_tx_queues) {
1385                 txq = dev->data->tx_queues[tx_queue_id];
1386
1387                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1388
1389                 if (err) {
1390                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
1391                                     tx_queue_id);
1392                         return err;
1393                 }
1394
1395                 i40e_tx_queue_release_mbufs(txq);
1396                 i40e_reset_tx_queue(txq);
1397         }
1398
1399         return 0;
1400 }
1401
1402 static int
1403 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1404 {
1405         int ret;
1406
1407         if (on)
1408                 ret = i40evf_add_vlan(dev, vlan_id);
1409         else
1410                 ret = i40evf_del_vlan(dev,vlan_id);
1411
1412         return ret;
1413 }
1414
1415 static int
1416 i40evf_rx_init(struct rte_eth_dev *dev)
1417 {
1418         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1419         uint16_t i;
1420         struct i40e_rx_queue **rxq =
1421                 (struct i40e_rx_queue **)dev->data->rx_queues;
1422         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1423
1424         i40evf_config_rss(vf);
1425         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1426                 rxq[i]->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(i);
1427                 I40E_PCI_REG_WRITE(rxq[i]->qrx_tail, rxq[i]->nb_rx_desc - 1);
1428         }
1429
1430         /* Flush the operation to write registers */
1431         I40EVF_WRITE_FLUSH(hw);
1432
1433         return 0;
1434 }
1435
1436 static void
1437 i40evf_tx_init(struct rte_eth_dev *dev)
1438 {
1439         uint16_t i;
1440         struct i40e_tx_queue **txq =
1441                 (struct i40e_tx_queue **)dev->data->tx_queues;
1442         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1443
1444         for (i = 0; i < dev->data->nb_tx_queues; i++)
1445                 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1446 }
1447
1448 static inline void
1449 i40evf_enable_queues_intr(struct i40e_hw *hw)
1450 {
1451         I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1452                         I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1453                         I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1454 }
1455
1456 static inline void
1457 i40evf_disable_queues_intr(struct i40e_hw *hw)
1458 {
1459         I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1460                         0);
1461 }
1462
1463 static int
1464 i40evf_dev_start(struct rte_eth_dev *dev)
1465 {
1466         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1467         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1468         struct ether_addr mac_addr;
1469
1470         PMD_INIT_FUNC_TRACE();
1471
1472         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1473         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1474                 if (vf->max_pkt_len <= ETHER_MAX_LEN ||
1475                         vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1476                         PMD_DRV_LOG(ERR, "maximum packet length must "
1477                                     "be larger than %u and smaller than %u,"
1478                                     "as jumbo frame is enabled",
1479                                     (uint32_t)ETHER_MAX_LEN,
1480                                     (uint32_t)I40E_FRAME_SIZE_MAX);
1481                         return I40E_ERR_CONFIG;
1482                 }
1483         } else {
1484                 if (vf->max_pkt_len < ETHER_MIN_LEN ||
1485                         vf->max_pkt_len > ETHER_MAX_LEN) {
1486                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1487                                     "larger than %u and smaller than %u, "
1488                                     "as jumbo frame is disabled",
1489                                     (uint32_t)ETHER_MIN_LEN,
1490                                     (uint32_t)ETHER_MAX_LEN);
1491                         return I40E_ERR_CONFIG;
1492                 }
1493         }
1494
1495         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1496                                         dev->data->nb_tx_queues);
1497
1498         if (i40evf_rx_init(dev) != 0){
1499                 PMD_DRV_LOG(ERR, "failed to do RX init");
1500                 return -1;
1501         }
1502
1503         i40evf_tx_init(dev);
1504
1505         if (i40evf_configure_queues(dev) != 0) {
1506                 PMD_DRV_LOG(ERR, "configure queues failed");
1507                 goto err_queue;
1508         }
1509         if (i40evf_config_irq_map(dev)) {
1510                 PMD_DRV_LOG(ERR, "config_irq_map failed");
1511                 goto err_queue;
1512         }
1513
1514         /* Set mac addr */
1515         (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
1516                                 sizeof(mac_addr.addr_bytes));
1517         if (i40evf_add_mac_addr(dev, &mac_addr)) {
1518                 PMD_DRV_LOG(ERR, "Failed to add mac addr");
1519                 goto err_queue;
1520         }
1521
1522         if (i40evf_start_queues(dev) != 0) {
1523                 PMD_DRV_LOG(ERR, "enable queues failed");
1524                 goto err_mac;
1525         }
1526
1527         i40evf_enable_queues_intr(hw);
1528         return 0;
1529
1530 err_mac:
1531         i40evf_del_mac_addr(dev, &mac_addr);
1532 err_queue:
1533         return -1;
1534 }
1535
1536 static void
1537 i40evf_dev_stop(struct rte_eth_dev *dev)
1538 {
1539         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1540
1541         PMD_INIT_FUNC_TRACE();
1542
1543         i40evf_disable_queues_intr(hw);
1544         i40evf_stop_queues(dev);
1545 }
1546
1547 static int
1548 i40evf_dev_link_update(struct rte_eth_dev *dev,
1549                        __rte_unused int wait_to_complete)
1550 {
1551         struct rte_eth_link new_link;
1552         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1553         /*
1554          * DPDK pf host provide interfacet to acquire link status
1555          * while Linux driver does not
1556          */
1557         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1558                 i40evf_get_link_status(dev, &new_link);
1559         else {
1560                 /* Always assume it's up, for Linux driver PF host */
1561                 new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
1562                 new_link.link_speed  = ETH_LINK_SPEED_10000;
1563                 new_link.link_status = 1;
1564         }
1565         i40evf_dev_atomic_write_link_status(dev, &new_link);
1566
1567         return 0;
1568 }
1569
1570 static void
1571 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1572 {
1573         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1574         int ret;
1575
1576         /* If enabled, just return */
1577         if (vf->promisc_unicast_enabled)
1578                 return;
1579
1580         ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
1581         if (ret == 0)
1582                 vf->promisc_unicast_enabled = TRUE;
1583 }
1584
1585 static void
1586 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1587 {
1588         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1589         int ret;
1590
1591         /* If disabled, just return */
1592         if (!vf->promisc_unicast_enabled)
1593                 return;
1594
1595         ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
1596         if (ret == 0)
1597                 vf->promisc_unicast_enabled = FALSE;
1598 }
1599
1600 static void
1601 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1602 {
1603         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1604         int ret;
1605
1606         /* If enabled, just return */
1607         if (vf->promisc_multicast_enabled)
1608                 return;
1609
1610         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
1611         if (ret == 0)
1612                 vf->promisc_multicast_enabled = TRUE;
1613 }
1614
1615 static void
1616 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1617 {
1618         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1619         int ret;
1620
1621         /* If enabled, just return */
1622         if (!vf->promisc_multicast_enabled)
1623                 return;
1624
1625         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
1626         if (ret == 0)
1627                 vf->promisc_multicast_enabled = FALSE;
1628 }
1629
1630 static void
1631 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1632 {
1633         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1634
1635         memset(dev_info, 0, sizeof(*dev_info));
1636         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
1637         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
1638         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1639         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1640         dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
1641
1642         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1643                 .rx_thresh = {
1644                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
1645                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
1646                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
1647                 },
1648                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1649                 .rx_drop_en = 0,
1650         };
1651
1652         dev_info->default_txconf = (struct rte_eth_txconf) {
1653                 .tx_thresh = {
1654                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
1655                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
1656                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
1657                 },
1658                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1659                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1660                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1661                                 ETH_TXQ_FLAGS_NOOFFLOADS,
1662         };
1663 }
1664
1665 static void
1666 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1667 {
1668         memset(stats, 0, sizeof(*stats));
1669         if (i40evf_get_statics(dev, stats))
1670                 PMD_DRV_LOG(ERR, "Get statics failed");
1671 }
1672
1673 static void
1674 i40evf_dev_close(struct rte_eth_dev *dev)
1675 {
1676         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1677
1678         i40evf_dev_stop(dev);
1679         i40evf_reset_vf(hw);
1680         i40e_shutdown_adminq(hw);
1681 }
1682
1683 static int
1684 i40evf_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
1685 {
1686         uint32_t *hash_key;
1687         uint8_t hash_key_len;
1688         uint64_t rss_hf, hena;
1689
1690         hash_key = (uint32_t *)(rss_conf->rss_key);
1691         hash_key_len = rss_conf->rss_key_len;
1692         if (hash_key != NULL && hash_key_len >=
1693                 (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
1694                 uint16_t i;
1695
1696                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1697                         I40E_WRITE_REG(hw, I40E_VFQF_HKEY(i), hash_key[i]);
1698         }
1699
1700         rss_hf = rss_conf->rss_hf;
1701         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1702         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1703         hena &= ~I40E_RSS_HENA_ALL;
1704         hena |= i40e_config_hena(rss_hf);
1705         I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
1706         I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
1707         I40EVF_WRITE_FLUSH(hw);
1708
1709         return 0;
1710 }
1711
1712 static void
1713 i40evf_disable_rss(struct i40e_vf *vf)
1714 {
1715         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
1716         uint64_t hena;
1717
1718         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1719         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1720         hena &= ~I40E_RSS_HENA_ALL;
1721         I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
1722         I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
1723         I40EVF_WRITE_FLUSH(hw);
1724 }
1725
1726 static int
1727 i40evf_config_rss(struct i40e_vf *vf)
1728 {
1729         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
1730         struct rte_eth_rss_conf rss_conf;
1731         uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
1732
1733         if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
1734                 i40evf_disable_rss(vf);
1735                 PMD_DRV_LOG(DEBUG, "RSS not configured\n");
1736                 return 0;
1737         }
1738
1739         /* Fill out the look up table */
1740         for (i = 0, j = 0; i < nb_q; i++, j++) {
1741                 if (j >= vf->num_queue_pairs)
1742                         j = 0;
1743                 lut = (lut << 8) | j;
1744                 if ((i & 3) == 3)
1745                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
1746         }
1747
1748         rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
1749         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
1750                 i40evf_disable_rss(vf);
1751                 PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
1752                 return 0;
1753         }
1754
1755         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < nb_q) {
1756                 /* Calculate the default hash key */
1757                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1758                         rss_key_default[i] = (uint32_t)rte_rand();
1759                 rss_conf.rss_key = (uint8_t *)rss_key_default;
1760                 rss_conf.rss_key_len = nb_q;
1761         }
1762
1763         return i40evf_hw_rss_hash_set(hw, &rss_conf);
1764 }
1765
1766 static int
1767 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
1768                            struct rte_eth_rss_conf *rss_conf)
1769 {
1770         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1771         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
1772         uint64_t hena;
1773
1774         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1775         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1776         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
1777                 if (rss_hf != 0) /* Enable RSS */
1778                         return -EINVAL;
1779                 return 0;
1780         }
1781
1782         /* RSS enabled */
1783         if (rss_hf == 0) /* Disable RSS */
1784                 return -EINVAL;
1785
1786         return i40evf_hw_rss_hash_set(hw, rss_conf);
1787 }
1788
1789 static int
1790 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1791                              struct rte_eth_rss_conf *rss_conf)
1792 {
1793         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1794         uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
1795         uint64_t hena;
1796         uint16_t i;
1797
1798         if (hash_key) {
1799                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1800                         hash_key[i] = I40E_READ_REG(hw, I40E_VFQF_HKEY(i));
1801                 rss_conf->rss_key_len = i * sizeof(uint32_t);
1802         }
1803         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1804         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1805         rss_conf->rss_hf = i40e_parse_hena(hena);
1806
1807         return 0;
1808 }