remove extra parentheses in return statement
[dpdk.git] / drivers / net / i40e / i40e_ethdev_vf.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
45
46 #include <rte_interrupts.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_eal.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_atomic.h>
59 #include <rte_malloc.h>
60 #include <rte_dev.h>
61
62 #include "i40e_logs.h"
63 #include "base/i40e_prototype.h"
64 #include "base/i40e_adminq_cmd.h"
65 #include "base/i40e_type.h"
66
67 #include "i40e_rxtx.h"
68 #include "i40e_ethdev.h"
69 #include "i40e_pf.h"
70 #define I40EVF_VSI_DEFAULT_MSIX_INTR     1
71 #define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
72
73 /* busy wait delay in msec */
74 #define I40EVF_BUSY_WAIT_DELAY 10
75 #define I40EVF_BUSY_WAIT_COUNT 50
76 #define MAX_RESET_WAIT_CNT     20
77 /*ITR index for NOITR*/
78 #define I40E_QINT_RQCTL_MSIX_INDX_NOITR     3
79
80 struct i40evf_arq_msg_info {
81         enum i40e_virtchnl_ops ops;
82         enum i40e_status_code result;
83         uint16_t buf_len;
84         uint16_t msg_len;
85         uint8_t *msg;
86 };
87
88 struct vf_cmd_info {
89         enum i40e_virtchnl_ops ops;
90         uint8_t *in_args;
91         uint32_t in_args_size;
92         uint8_t *out_buffer;
93         /* Input & output type. pass in buffer size and pass out
94          * actual return result
95          */
96         uint32_t out_size;
97 };
98
99 enum i40evf_aq_result {
100         I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
101         I40EVF_MSG_NON,      /* Read nothing from admin queue */
102         I40EVF_MSG_SYS,      /* Read system msg from admin queue */
103         I40EVF_MSG_CMD,      /* Read async command result */
104 };
105
106 /* A share buffer to store the command result from PF driver */
107 static uint8_t cmd_result_buffer[I40E_AQ_BUF_SZ];
108
109 static int i40evf_dev_configure(struct rte_eth_dev *dev);
110 static int i40evf_dev_start(struct rte_eth_dev *dev);
111 static void i40evf_dev_stop(struct rte_eth_dev *dev);
112 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
113                                 struct rte_eth_dev_info *dev_info);
114 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
115                                   __rte_unused int wait_to_complete);
116 static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
117                                 struct rte_eth_stats *stats);
118 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
119                                  struct rte_eth_xstats *xstats, unsigned n);
120 static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
121 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
122                                   uint16_t vlan_id, int on);
123 static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
124 static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
125                                 int on);
126 static void i40evf_dev_close(struct rte_eth_dev *dev);
127 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
128 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
129 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
130 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
131 static int i40evf_get_link_status(struct rte_eth_dev *dev,
132                                   struct rte_eth_link *link);
133 static int i40evf_init_vlan(struct rte_eth_dev *dev);
134 static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
135                                      uint16_t rx_queue_id);
136 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
137                                     uint16_t rx_queue_id);
138 static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
139                                      uint16_t tx_queue_id);
140 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
141                                     uint16_t tx_queue_id);
142 static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
143                         struct rte_eth_rss_reta_entry64 *reta_conf,
144                         uint16_t reta_size);
145 static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
146                         struct rte_eth_rss_reta_entry64 *reta_conf,
147                         uint16_t reta_size);
148 static int i40evf_config_rss(struct i40e_vf *vf);
149 static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
150                                       struct rte_eth_rss_conf *rss_conf);
151 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
152                                         struct rte_eth_rss_conf *rss_conf);
153 static int
154 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
155 static int
156 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
157
158 /* Default hash key buffer for RSS */
159 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
160
161 struct rte_i40evf_xstats_name_off {
162         char name[RTE_ETH_XSTATS_NAME_SIZE];
163         unsigned offset;
164 };
165
166 static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
167         {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
168         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
169         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
170         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
171         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
172         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
173                 rx_unknown_protocol)},
174         {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
175         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
176         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
177         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
178         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
179         {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
180 };
181
182 #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
183                 sizeof(rte_i40evf_stats_strings[0]))
184
185 static const struct eth_dev_ops i40evf_eth_dev_ops = {
186         .dev_configure        = i40evf_dev_configure,
187         .dev_start            = i40evf_dev_start,
188         .dev_stop             = i40evf_dev_stop,
189         .promiscuous_enable   = i40evf_dev_promiscuous_enable,
190         .promiscuous_disable  = i40evf_dev_promiscuous_disable,
191         .allmulticast_enable  = i40evf_dev_allmulticast_enable,
192         .allmulticast_disable = i40evf_dev_allmulticast_disable,
193         .link_update          = i40evf_dev_link_update,
194         .stats_get            = i40evf_dev_stats_get,
195         .xstats_get           = i40evf_dev_xstats_get,
196         .xstats_reset         = i40evf_dev_xstats_reset,
197         .dev_close            = i40evf_dev_close,
198         .dev_infos_get        = i40evf_dev_info_get,
199         .vlan_filter_set      = i40evf_vlan_filter_set,
200         .vlan_offload_set     = i40evf_vlan_offload_set,
201         .vlan_pvid_set        = i40evf_vlan_pvid_set,
202         .rx_queue_start       = i40evf_dev_rx_queue_start,
203         .rx_queue_stop        = i40evf_dev_rx_queue_stop,
204         .tx_queue_start       = i40evf_dev_tx_queue_start,
205         .tx_queue_stop        = i40evf_dev_tx_queue_stop,
206         .rx_queue_setup       = i40e_dev_rx_queue_setup,
207         .rx_queue_release     = i40e_dev_rx_queue_release,
208         .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
209         .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
210         .rx_descriptor_done   = i40e_dev_rx_descriptor_done,
211         .tx_queue_setup       = i40e_dev_tx_queue_setup,
212         .tx_queue_release     = i40e_dev_tx_queue_release,
213         .reta_update          = i40evf_dev_rss_reta_update,
214         .reta_query           = i40evf_dev_rss_reta_query,
215         .rss_hash_update      = i40evf_dev_rss_hash_update,
216         .rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
217 };
218
219 static int
220 i40evf_set_mac_type(struct i40e_hw *hw)
221 {
222         int status = I40E_ERR_DEVICE_NOT_SUPPORTED;
223
224         if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
225                 switch (hw->device_id) {
226                 case I40E_DEV_ID_VF:
227                 case I40E_DEV_ID_VF_HV:
228                         hw->mac.type = I40E_MAC_VF;
229                         status = I40E_SUCCESS;
230                         break;
231                 default:
232                         ;
233                 }
234         }
235
236         return status;
237 }
238
239 /*
240  * Parse admin queue message.
241  *
242  * return value:
243  *  < 0: meet error
244  *  0: read sys msg
245  *  > 0: read cmd result
246  */
247 static enum i40evf_aq_result
248 i40evf_parse_pfmsg(struct i40e_vf *vf,
249                    struct i40e_arq_event_info *event,
250                    struct i40evf_arq_msg_info *data)
251 {
252         enum i40e_virtchnl_ops opcode = (enum i40e_virtchnl_ops)\
253                         rte_le_to_cpu_32(event->desc.cookie_high);
254         enum i40e_status_code retval = (enum i40e_status_code)\
255                         rte_le_to_cpu_32(event->desc.cookie_low);
256         enum i40evf_aq_result ret = I40EVF_MSG_CMD;
257
258         /* pf sys event */
259         if (opcode == I40E_VIRTCHNL_OP_EVENT) {
260                 struct i40e_virtchnl_pf_event *vpe =
261                         (struct i40e_virtchnl_pf_event *)event->msg_buf;
262
263                 /* Initialize ret to sys event */
264                 ret = I40EVF_MSG_SYS;
265                 switch (vpe->event) {
266                 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
267                         vf->link_up =
268                                 vpe->event_data.link_event.link_status;
269                         vf->pend_msg |= PFMSG_LINK_CHANGE;
270                         PMD_DRV_LOG(INFO, "Link status update:%s",
271                                     vf->link_up ? "up" : "down");
272                         break;
273                 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
274                         vf->vf_reset = true;
275                         vf->pend_msg |= PFMSG_RESET_IMPENDING;
276                         PMD_DRV_LOG(INFO, "vf is reseting");
277                         break;
278                 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
279                         vf->dev_closed = true;
280                         vf->pend_msg |= PFMSG_DRIVER_CLOSE;
281                         PMD_DRV_LOG(INFO, "PF driver closed");
282                         break;
283                 default:
284                         PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
285                                     __func__, vpe->event);
286                 }
287         } else {
288                 /* async reply msg on command issued by vf previously */
289                 ret = I40EVF_MSG_CMD;
290                 /* Actual data length read from PF */
291                 data->msg_len = event->msg_len;
292         }
293         /* fill the ops and result to notify VF */
294         data->result = retval;
295         data->ops = opcode;
296
297         return ret;
298 }
299
300 /*
301  * Read data in admin queue to get msg from pf driver
302  */
303 static enum i40evf_aq_result
304 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
305 {
306         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
307         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
308         struct i40e_arq_event_info event;
309         int ret;
310         enum i40evf_aq_result result = I40EVF_MSG_NON;
311
312         event.buf_len = data->buf_len;
313         event.msg_buf = data->msg;
314         ret = i40e_clean_arq_element(hw, &event, NULL);
315         /* Can't read any msg from adminQ */
316         if (ret) {
317                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
318                         result = I40EVF_MSG_NON;
319                 else
320                         result = I40EVF_MSG_ERR;
321                 return result;
322         }
323
324         /* Parse the event */
325         result = i40evf_parse_pfmsg(vf, &event, data);
326
327         return result;
328 }
329
330 /*
331  * Polling read until command result return from pf driver or meet error.
332  */
333 static int
334 i40evf_wait_cmd_done(struct rte_eth_dev *dev,
335                      struct i40evf_arq_msg_info *data)
336 {
337         int i = 0;
338         enum i40evf_aq_result ret;
339
340 #define MAX_TRY_TIMES 20
341 #define ASQ_DELAY_MS  100
342         do {
343                 /* Delay some time first */
344                 rte_delay_ms(ASQ_DELAY_MS);
345                 ret = i40evf_read_pfmsg(dev, data);
346                 if (ret == I40EVF_MSG_CMD)
347                         return 0;
348                 else if (ret == I40EVF_MSG_ERR)
349                         return -1;
350
351                 /* If don't read msg or read sys event, continue */
352         } while(i++ < MAX_TRY_TIMES);
353
354         return -1;
355 }
356
357 /**
358  * clear current command. Only call in case execute
359  * _atomic_set_cmd successfully.
360  */
361 static inline void
362 _clear_cmd(struct i40e_vf *vf)
363 {
364         rte_wmb();
365         vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
366 }
367
368 /*
369  * Check there is pending cmd in execution. If none, set new command.
370  */
371 static inline int
372 _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
373 {
374         int ret = rte_atomic32_cmpset(&vf->pend_cmd,
375                         I40E_VIRTCHNL_OP_UNKNOWN, ops);
376
377         if (!ret)
378                 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
379
380         return !ret;
381 }
382
383 static int
384 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
385 {
386         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
387         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
388         int err = -1;
389         struct i40evf_arq_msg_info info;
390
391         if (_atomic_set_cmd(vf, args->ops))
392                 return -1;
393
394         info.msg = args->out_buffer;
395         info.buf_len = args->out_size;
396         info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
397         info.result = I40E_SUCCESS;
398
399         err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
400                      args->in_args, args->in_args_size, NULL);
401         if (err) {
402                 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
403                 _clear_cmd(vf);
404                 return err;
405         }
406
407         err = i40evf_wait_cmd_done(dev, &info);
408         /* read message and it's expected one */
409         if (!err && args->ops == info.ops)
410                 _clear_cmd(vf);
411         else if (err) {
412                 PMD_DRV_LOG(ERR, "Failed to read message from AdminQ");
413                 _clear_cmd(vf);
414         }
415         else if (args->ops != info.ops)
416                 PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
417                             args->ops, info.ops);
418
419         return err | info.result;
420 }
421
422 /*
423  * Check API version with sync wait until version read or fail from admin queue
424  */
425 static int
426 i40evf_check_api_version(struct rte_eth_dev *dev)
427 {
428         struct i40e_virtchnl_version_info version, *pver;
429         int err;
430         struct vf_cmd_info args;
431         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
432
433         version.major = I40E_VIRTCHNL_VERSION_MAJOR;
434         version.minor = I40E_VIRTCHNL_VERSION_MINOR;
435
436         args.ops = I40E_VIRTCHNL_OP_VERSION;
437         args.in_args = (uint8_t *)&version;
438         args.in_args_size = sizeof(version);
439         args.out_buffer = cmd_result_buffer;
440         args.out_size = I40E_AQ_BUF_SZ;
441
442         err = i40evf_execute_vf_cmd(dev, &args);
443         if (err) {
444                 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
445                 return err;
446         }
447
448         pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
449         vf->version_major = pver->major;
450         vf->version_minor = pver->minor;
451         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
452                 PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
453         else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
454                 (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR))
455                 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
456         else {
457                 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
458                                         vf->version_major, vf->version_minor,
459                                                 I40E_VIRTCHNL_VERSION_MAJOR,
460                                                 I40E_VIRTCHNL_VERSION_MINOR);
461                 return -1;
462         }
463
464         return 0;
465 }
466
467 static int
468 i40evf_get_vf_resource(struct rte_eth_dev *dev)
469 {
470         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
471         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
472         int err;
473         struct vf_cmd_info args;
474         uint32_t caps, len;
475
476         args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
477         args.out_buffer = cmd_result_buffer;
478         args.out_size = I40E_AQ_BUF_SZ;
479         if (PF_IS_V11(vf)) {
480                 caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
481                        I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
482                        I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
483                        I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
484                        I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
485                 args.in_args = (uint8_t *)&caps;
486                 args.in_args_size = sizeof(caps);
487         } else {
488                 args.in_args = NULL;
489                 args.in_args_size = 0;
490         }
491         err = i40evf_execute_vf_cmd(dev, &args);
492
493         if (err) {
494                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
495                 return err;
496         }
497
498         len =  sizeof(struct i40e_virtchnl_vf_resource) +
499                 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
500
501         (void)rte_memcpy(vf->vf_res, args.out_buffer,
502                         RTE_MIN(args.out_size, len));
503         i40e_vf_parse_hw_config(hw, vf->vf_res);
504
505         return 0;
506 }
507
508 static int
509 i40evf_config_promisc(struct rte_eth_dev *dev,
510                       bool enable_unicast,
511                       bool enable_multicast)
512 {
513         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
514         int err;
515         struct vf_cmd_info args;
516         struct i40e_virtchnl_promisc_info promisc;
517
518         promisc.flags = 0;
519         promisc.vsi_id = vf->vsi_res->vsi_id;
520
521         if (enable_unicast)
522                 promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
523
524         if (enable_multicast)
525                 promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
526
527         args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
528         args.in_args = (uint8_t *)&promisc;
529         args.in_args_size = sizeof(promisc);
530         args.out_buffer = cmd_result_buffer;
531         args.out_size = I40E_AQ_BUF_SZ;
532
533         err = i40evf_execute_vf_cmd(dev, &args);
534
535         if (err)
536                 PMD_DRV_LOG(ERR, "fail to execute command "
537                             "CONFIG_PROMISCUOUS_MODE");
538         return err;
539 }
540
541 /* Configure vlan and double vlan offload. Use flag to specify which part to configure */
542 static int
543 i40evf_config_vlan_offload(struct rte_eth_dev *dev,
544                                 bool enable_vlan_strip)
545 {
546         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
547         int err;
548         struct vf_cmd_info args;
549         struct i40e_virtchnl_vlan_offload_info offload;
550
551         offload.vsi_id = vf->vsi_res->vsi_id;
552         offload.enable_vlan_strip = enable_vlan_strip;
553
554         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
555         args.in_args = (uint8_t *)&offload;
556         args.in_args_size = sizeof(offload);
557         args.out_buffer = cmd_result_buffer;
558         args.out_size = I40E_AQ_BUF_SZ;
559
560         err = i40evf_execute_vf_cmd(dev, &args);
561         if (err)
562                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
563
564         return err;
565 }
566
567 static int
568 i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
569                                 struct i40e_vsi_vlan_pvid_info *info)
570 {
571         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
572         int err;
573         struct vf_cmd_info args;
574         struct i40e_virtchnl_pvid_info tpid_info;
575
576         if (dev == NULL || info == NULL) {
577                 PMD_DRV_LOG(ERR, "invalid parameters");
578                 return I40E_ERR_PARAM;
579         }
580
581         memset(&tpid_info, 0, sizeof(tpid_info));
582         tpid_info.vsi_id = vf->vsi_res->vsi_id;
583         (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
584
585         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
586         args.in_args = (uint8_t *)&tpid_info;
587         args.in_args_size = sizeof(tpid_info);
588         args.out_buffer = cmd_result_buffer;
589         args.out_size = I40E_AQ_BUF_SZ;
590
591         err = i40evf_execute_vf_cmd(dev, &args);
592         if (err)
593                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
594
595         return err;
596 }
597
598 static void
599 i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
600                                   uint16_t vsi_id,
601                                   uint16_t queue_id,
602                                   uint16_t nb_txq,
603                                   struct i40e_tx_queue *txq)
604 {
605         txq_info->vsi_id = vsi_id;
606         txq_info->queue_id = queue_id;
607         if (queue_id < nb_txq) {
608                 txq_info->ring_len = txq->nb_tx_desc;
609                 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
610         }
611 }
612
613 static void
614 i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
615                                   uint16_t vsi_id,
616                                   uint16_t queue_id,
617                                   uint16_t nb_rxq,
618                                   uint32_t max_pkt_size,
619                                   struct i40e_rx_queue *rxq)
620 {
621         rxq_info->vsi_id = vsi_id;
622         rxq_info->queue_id = queue_id;
623         rxq_info->max_pkt_size = max_pkt_size;
624         if (queue_id < nb_rxq) {
625                 rxq_info->ring_len = rxq->nb_rx_desc;
626                 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
627                 rxq_info->databuffer_size =
628                         (rte_pktmbuf_data_room_size(rxq->mp) -
629                                 RTE_PKTMBUF_HEADROOM);
630         }
631 }
632
633 /* It configures VSI queues to co-work with Linux PF host */
634 static int
635 i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
636 {
637         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
638         struct i40e_rx_queue **rxq =
639                 (struct i40e_rx_queue **)dev->data->rx_queues;
640         struct i40e_tx_queue **txq =
641                 (struct i40e_tx_queue **)dev->data->tx_queues;
642         struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
643         struct i40e_virtchnl_queue_pair_info *vc_qpi;
644         struct vf_cmd_info args;
645         uint16_t i, nb_qp = vf->num_queue_pairs;
646         const uint32_t size =
647                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
648         uint8_t buff[size];
649         int ret;
650
651         memset(buff, 0, sizeof(buff));
652         vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
653         vc_vqci->vsi_id = vf->vsi_res->vsi_id;
654         vc_vqci->num_queue_pairs = nb_qp;
655
656         for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
657                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
658                         vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
659                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
660                         vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
661                                         vf->max_pkt_len, rxq[i]);
662         }
663         memset(&args, 0, sizeof(args));
664         args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
665         args.in_args = (uint8_t *)vc_vqci;
666         args.in_args_size = size;
667         args.out_buffer = cmd_result_buffer;
668         args.out_size = I40E_AQ_BUF_SZ;
669         ret = i40evf_execute_vf_cmd(dev, &args);
670         if (ret)
671                 PMD_DRV_LOG(ERR, "Failed to execute command of "
672                         "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
673
674         return ret;
675 }
676
677 /* It configures VSI queues to co-work with DPDK PF host */
678 static int
679 i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
680 {
681         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
682         struct i40e_rx_queue **rxq =
683                 (struct i40e_rx_queue **)dev->data->rx_queues;
684         struct i40e_tx_queue **txq =
685                 (struct i40e_tx_queue **)dev->data->tx_queues;
686         struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
687         struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
688         struct vf_cmd_info args;
689         uint16_t i, nb_qp = vf->num_queue_pairs;
690         const uint32_t size =
691                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
692         uint8_t buff[size];
693         int ret;
694
695         memset(buff, 0, sizeof(buff));
696         vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
697         vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
698         vc_vqcei->num_queue_pairs = nb_qp;
699         vc_qpei = vc_vqcei->qpair;
700         for (i = 0; i < nb_qp; i++, vc_qpei++) {
701                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
702                         vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
703                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
704                         vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
705                                         vf->max_pkt_len, rxq[i]);
706                 if (i < dev->data->nb_rx_queues)
707                         /*
708                          * It adds extra info for configuring VSI queues, which
709                          * is needed to enable the configurable crc stripping
710                          * in VF.
711                          */
712                         vc_qpei->rxq_ext.crcstrip =
713                                 dev->data->dev_conf.rxmode.hw_strip_crc;
714         }
715         memset(&args, 0, sizeof(args));
716         args.ops =
717                 (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
718         args.in_args = (uint8_t *)vc_vqcei;
719         args.in_args_size = size;
720         args.out_buffer = cmd_result_buffer;
721         args.out_size = I40E_AQ_BUF_SZ;
722         ret = i40evf_execute_vf_cmd(dev, &args);
723         if (ret)
724                 PMD_DRV_LOG(ERR, "Failed to execute command of "
725                         "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
726
727         return ret;
728 }
729
730 static int
731 i40evf_configure_queues(struct rte_eth_dev *dev)
732 {
733         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
734
735         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
736                 /* To support DPDK PF host */
737                 return i40evf_configure_vsi_queues_ext(dev);
738         else
739                 /* To support Linux PF host */
740                 return i40evf_configure_vsi_queues(dev);
741 }
742
743 static int
744 i40evf_config_irq_map(struct rte_eth_dev *dev)
745 {
746         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
747         struct vf_cmd_info args;
748         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
749                 sizeof(struct i40e_virtchnl_vector_map)];
750         struct i40e_virtchnl_irq_map_info *map_info;
751         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
752         uint32_t vector_id;
753         int i, err;
754
755         if (rte_intr_allow_others(intr_handle)) {
756                 if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
757                         vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
758                 else
759                         vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX;
760         } else {
761                 vector_id = I40E_MISC_VEC_ID;
762         }
763
764         map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
765         map_info->num_vectors = 1;
766         map_info->vecmap[0].rxitr_idx = I40E_QINT_RQCTL_MSIX_INDX_NOITR;
767         map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
768         /* Alway use default dynamic MSIX interrupt */
769         map_info->vecmap[0].vector_id = vector_id;
770         /* Don't map any tx queue */
771         map_info->vecmap[0].txq_map = 0;
772         map_info->vecmap[0].rxq_map = 0;
773         for (i = 0; i < dev->data->nb_rx_queues; i++) {
774                 map_info->vecmap[0].rxq_map |= 1 << i;
775                 if (rte_intr_dp_is_en(intr_handle))
776                         intr_handle->intr_vec[i] = vector_id;
777         }
778
779         args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
780         args.in_args = (u8 *)cmd_buffer;
781         args.in_args_size = sizeof(cmd_buffer);
782         args.out_buffer = cmd_result_buffer;
783         args.out_size = I40E_AQ_BUF_SZ;
784         err = i40evf_execute_vf_cmd(dev, &args);
785         if (err)
786                 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
787
788         return err;
789 }
790
791 static int
792 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
793                                 bool on)
794 {
795         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
796         struct i40e_virtchnl_queue_select queue_select;
797         int err;
798         struct vf_cmd_info args;
799         memset(&queue_select, 0, sizeof(queue_select));
800         queue_select.vsi_id = vf->vsi_res->vsi_id;
801
802         if (isrx)
803                 queue_select.rx_queues |= 1 << qid;
804         else
805                 queue_select.tx_queues |= 1 << qid;
806
807         if (on)
808                 args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
809         else
810                 args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
811         args.in_args = (u8 *)&queue_select;
812         args.in_args_size = sizeof(queue_select);
813         args.out_buffer = cmd_result_buffer;
814         args.out_size = I40E_AQ_BUF_SZ;
815         err = i40evf_execute_vf_cmd(dev, &args);
816         if (err)
817                 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
818                             isrx ? "RX" : "TX", qid, on ? "on" : "off");
819
820         return err;
821 }
822
823 static int
824 i40evf_start_queues(struct rte_eth_dev *dev)
825 {
826         struct rte_eth_dev_data *dev_data = dev->data;
827         int i;
828         struct i40e_rx_queue *rxq;
829         struct i40e_tx_queue *txq;
830
831         for (i = 0; i < dev->data->nb_rx_queues; i++) {
832                 rxq = dev_data->rx_queues[i];
833                 if (rxq->rx_deferred_start)
834                         continue;
835                 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
836                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
837                         return -1;
838                 }
839         }
840
841         for (i = 0; i < dev->data->nb_tx_queues; i++) {
842                 txq = dev_data->tx_queues[i];
843                 if (txq->tx_deferred_start)
844                         continue;
845                 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
846                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
847                         return -1;
848                 }
849         }
850
851         return 0;
852 }
853
854 static int
855 i40evf_stop_queues(struct rte_eth_dev *dev)
856 {
857         int i;
858
859         /* Stop TX queues first */
860         for (i = 0; i < dev->data->nb_tx_queues; i++) {
861                 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
862                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
863                         return -1;
864                 }
865         }
866
867         /* Then stop RX queues */
868         for (i = 0; i < dev->data->nb_rx_queues; i++) {
869                 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
870                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
871                         return -1;
872                 }
873         }
874
875         return 0;
876 }
877
878 static int
879 i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
880 {
881         struct i40e_virtchnl_ether_addr_list *list;
882         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
883         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
884                         sizeof(struct i40e_virtchnl_ether_addr)];
885         int err;
886         struct vf_cmd_info args;
887
888         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
889                 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
890                             addr->addr_bytes[0], addr->addr_bytes[1],
891                             addr->addr_bytes[2], addr->addr_bytes[3],
892                             addr->addr_bytes[4], addr->addr_bytes[5]);
893                 return -1;
894         }
895
896         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
897         list->vsi_id = vf->vsi_res->vsi_id;
898         list->num_elements = 1;
899         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
900                                         sizeof(addr->addr_bytes));
901
902         args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
903         args.in_args = cmd_buffer;
904         args.in_args_size = sizeof(cmd_buffer);
905         args.out_buffer = cmd_result_buffer;
906         args.out_size = I40E_AQ_BUF_SZ;
907         err = i40evf_execute_vf_cmd(dev, &args);
908         if (err)
909                 PMD_DRV_LOG(ERR, "fail to execute command "
910                             "OP_ADD_ETHER_ADDRESS");
911
912         return err;
913 }
914
915 static int
916 i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
917 {
918         struct i40e_virtchnl_ether_addr_list *list;
919         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
920         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
921                         sizeof(struct i40e_virtchnl_ether_addr)];
922         int err;
923         struct vf_cmd_info args;
924
925         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
926                 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
927                             addr->addr_bytes[0], addr->addr_bytes[1],
928                             addr->addr_bytes[2], addr->addr_bytes[3],
929                             addr->addr_bytes[4], addr->addr_bytes[5]);
930                 return -1;
931         }
932
933         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
934         list->vsi_id = vf->vsi_res->vsi_id;
935         list->num_elements = 1;
936         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
937                         sizeof(addr->addr_bytes));
938
939         args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
940         args.in_args = cmd_buffer;
941         args.in_args_size = sizeof(cmd_buffer);
942         args.out_buffer = cmd_result_buffer;
943         args.out_size = I40E_AQ_BUF_SZ;
944         err = i40evf_execute_vf_cmd(dev, &args);
945         if (err)
946                 PMD_DRV_LOG(ERR, "fail to execute command "
947                             "OP_DEL_ETHER_ADDRESS");
948
949         return err;
950 }
951
952 static int
953 i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
954 {
955         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
956         struct i40e_virtchnl_queue_select q_stats;
957         int err;
958         struct vf_cmd_info args;
959
960         memset(&q_stats, 0, sizeof(q_stats));
961         q_stats.vsi_id = vf->vsi_res->vsi_id;
962         args.ops = I40E_VIRTCHNL_OP_GET_STATS;
963         args.in_args = (u8 *)&q_stats;
964         args.in_args_size = sizeof(q_stats);
965         args.out_buffer = cmd_result_buffer;
966         args.out_size = I40E_AQ_BUF_SZ;
967
968         err = i40evf_execute_vf_cmd(dev, &args);
969         if (err) {
970                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
971                 *pstats = NULL;
972                 return err;
973         }
974         *pstats = (struct i40e_eth_stats *)args.out_buffer;
975         return 0;
976 }
977
978 static int
979 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
980 {
981         int ret;
982         struct i40e_eth_stats *pstats = NULL;
983
984         ret = i40evf_update_stats(dev, &pstats);
985         if (ret != 0)
986                 return 0;
987
988         stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
989                                                 pstats->rx_broadcast;
990         stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
991                                                 pstats->tx_unicast;
992         stats->ierrors = pstats->rx_discards;
993         stats->oerrors = pstats->tx_errors + pstats->tx_discards;
994         stats->ibytes = pstats->rx_bytes;
995         stats->obytes = pstats->tx_bytes;
996
997         return 0;
998 }
999
1000 static void
1001 i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
1002 {
1003         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1004         struct i40e_eth_stats *pstats = NULL;
1005
1006         /* read stat values to clear hardware registers */
1007         i40evf_update_stats(dev, &pstats);
1008
1009         /* set stats offset base on current values */
1010         vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
1011 }
1012
1013 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
1014                                  struct rte_eth_xstats *xstats, unsigned n)
1015 {
1016         int ret;
1017         unsigned i;
1018         struct i40e_eth_stats *pstats = NULL;
1019
1020         if (n < I40EVF_NB_XSTATS)
1021                 return I40EVF_NB_XSTATS;
1022
1023         ret = i40evf_update_stats(dev, &pstats);
1024         if (ret != 0)
1025                 return 0;
1026
1027         if (!xstats)
1028                 return 0;
1029
1030         /* loop over xstats array and values from pstats */
1031         for (i = 0; i < I40EVF_NB_XSTATS; i++) {
1032                 snprintf(xstats[i].name, sizeof(xstats[i].name),
1033                          "%s", rte_i40evf_stats_strings[i].name);
1034                 xstats[i].value = *(uint64_t *)(((char *)pstats) +
1035                         rte_i40evf_stats_strings[i].offset);
1036         }
1037
1038         return I40EVF_NB_XSTATS;
1039 }
1040
1041 static int
1042 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1043 {
1044         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1045         struct i40e_virtchnl_vlan_filter_list *vlan_list;
1046         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
1047                                                         sizeof(uint16_t)];
1048         int err;
1049         struct vf_cmd_info args;
1050
1051         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
1052         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1053         vlan_list->num_elements = 1;
1054         vlan_list->vlan_id[0] = vlanid;
1055
1056         args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
1057         args.in_args = (u8 *)&cmd_buffer;
1058         args.in_args_size = sizeof(cmd_buffer);
1059         args.out_buffer = cmd_result_buffer;
1060         args.out_size = I40E_AQ_BUF_SZ;
1061         err = i40evf_execute_vf_cmd(dev, &args);
1062         if (err)
1063                 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
1064
1065         return err;
1066 }
1067
1068 static int
1069 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1070 {
1071         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1072         struct i40e_virtchnl_vlan_filter_list *vlan_list;
1073         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
1074                                                         sizeof(uint16_t)];
1075         int err;
1076         struct vf_cmd_info args;
1077
1078         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
1079         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1080         vlan_list->num_elements = 1;
1081         vlan_list->vlan_id[0] = vlanid;
1082
1083         args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
1084         args.in_args = (u8 *)&cmd_buffer;
1085         args.in_args_size = sizeof(cmd_buffer);
1086         args.out_buffer = cmd_result_buffer;
1087         args.out_size = I40E_AQ_BUF_SZ;
1088         err = i40evf_execute_vf_cmd(dev, &args);
1089         if (err)
1090                 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
1091
1092         return err;
1093 }
1094
1095 static int
1096 i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
1097 {
1098         int err;
1099         struct vf_cmd_info args;
1100         struct rte_eth_link *new_link;
1101
1102         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
1103         args.in_args = NULL;
1104         args.in_args_size = 0;
1105         args.out_buffer = cmd_result_buffer;
1106         args.out_size = I40E_AQ_BUF_SZ;
1107         err = i40evf_execute_vf_cmd(dev, &args);
1108         if (err) {
1109                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
1110                 return err;
1111         }
1112
1113         new_link = (struct rte_eth_link *)args.out_buffer;
1114         (void)rte_memcpy(link, new_link, sizeof(*link));
1115
1116         return 0;
1117 }
1118
1119 static const struct rte_pci_id pci_id_i40evf_map[] = {
1120 #define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
1121 #include "rte_pci_dev_ids.h"
1122 { .vendor_id = 0, /* sentinel */ },
1123 };
1124
1125 static inline int
1126 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
1127                                     struct rte_eth_link *link)
1128 {
1129         struct rte_eth_link *dst = &(dev->data->dev_link);
1130         struct rte_eth_link *src = link;
1131
1132         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1133                                         *(uint64_t *)src) == 0)
1134                 return -1;
1135
1136         return 0;
1137 }
1138
1139 static int
1140 i40evf_reset_vf(struct i40e_hw *hw)
1141 {
1142         int i, reset;
1143
1144         if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1145                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1146                 return -1;
1147         }
1148         /**
1149           * After issuing vf reset command to pf, pf won't necessarily
1150           * reset vf, it depends on what state it exactly is. If it's not
1151           * initialized yet, it won't have vf reset since it's in a certain
1152           * state. If not, it will try to reset. Even vf is reset, pf will
1153           * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1154           * it to ACTIVE. In this duration, vf may not catch the moment that
1155           * COMPLETE is set. So, for vf, we'll try to wait a long time.
1156           */
1157         rte_delay_ms(200);
1158
1159         for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1160                 reset = rd32(hw, I40E_VFGEN_RSTAT) &
1161                         I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1162                 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1163                 if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
1164                         break;
1165                 else
1166                         rte_delay_ms(50);
1167         }
1168
1169         if (i >= MAX_RESET_WAIT_CNT) {
1170                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1171                 return -1;
1172         }
1173
1174         return 0;
1175 }
1176
1177 static int
1178 i40evf_init_vf(struct rte_eth_dev *dev)
1179 {
1180         int i, err, bufsz;
1181         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1182         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1183
1184         vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1185         vf->dev_data = dev->data;
1186         err = i40evf_set_mac_type(hw);
1187         if (err) {
1188                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1189                 goto err;
1190         }
1191
1192         i40e_init_adminq_parameter(hw);
1193         err = i40e_init_adminq(hw);
1194         if (err) {
1195                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1196                 goto err;
1197         }
1198
1199
1200         /* Reset VF and wait until it's complete */
1201         if (i40evf_reset_vf(hw)) {
1202                 PMD_INIT_LOG(ERR, "reset NIC failed");
1203                 goto err_aq;
1204         }
1205
1206         /* VF reset, shutdown admin queue and initialize again */
1207         if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1208                 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1209                 return -1;
1210         }
1211
1212         i40e_init_adminq_parameter(hw);
1213         if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1214                 PMD_INIT_LOG(ERR, "init_adminq failed");
1215                 return -1;
1216         }
1217         if (i40evf_check_api_version(dev) != 0) {
1218                 PMD_INIT_LOG(ERR, "check_api version failed");
1219                 goto err_aq;
1220         }
1221         bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1222                 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1223         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1224         if (!vf->vf_res) {
1225                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1226                         goto err_aq;
1227         }
1228
1229         if (i40evf_get_vf_resource(dev) != 0) {
1230                 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1231                 goto err_alloc;
1232         }
1233
1234         /* got VF config message back from PF, now we can parse it */
1235         for (i = 0; i < vf->vf_res->num_vsis; i++) {
1236                 if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
1237                         vf->vsi_res = &vf->vf_res->vsi_res[i];
1238         }
1239
1240         if (!vf->vsi_res) {
1241                 PMD_INIT_LOG(ERR, "no LAN VSI found");
1242                 goto err_alloc;
1243         }
1244
1245         if (hw->mac.type == I40E_MAC_X722_VF)
1246                 vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
1247         vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1248         vf->vsi.type = vf->vsi_res->vsi_type;
1249         vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1250         vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1251
1252         /* check mac addr, if it's not valid, genrate one */
1253         if (I40E_SUCCESS != i40e_validate_mac_addr(\
1254                         vf->vsi_res->default_mac_addr))
1255                 eth_random_addr(vf->vsi_res->default_mac_addr);
1256
1257         ether_addr_copy((struct ether_addr *)vf->vsi_res->default_mac_addr,
1258                                         (struct ether_addr *)hw->mac.addr);
1259
1260         return 0;
1261
1262 err_alloc:
1263         rte_free(vf->vf_res);
1264 err_aq:
1265         i40e_shutdown_adminq(hw); /* ignore error */
1266 err:
1267         return -1;
1268 }
1269
1270 static int
1271 i40evf_uninit_vf(struct rte_eth_dev *dev)
1272 {
1273         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1274         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1275
1276         PMD_INIT_FUNC_TRACE();
1277
1278         if (hw->adapter_stopped == 0)
1279                 i40evf_dev_close(dev);
1280         rte_free(vf->vf_res);
1281         vf->vf_res = NULL;
1282
1283         return 0;
1284 }
1285
1286 static int
1287 i40evf_dev_init(struct rte_eth_dev *eth_dev)
1288 {
1289         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
1290                         eth_dev->data->dev_private);
1291
1292         PMD_INIT_FUNC_TRACE();
1293
1294         /* assign ops func pointer */
1295         eth_dev->dev_ops = &i40evf_eth_dev_ops;
1296         eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1297         eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1298
1299         /*
1300          * For secondary processes, we don't initialise any further as primary
1301          * has already done this work.
1302          */
1303         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1304                 i40e_set_rx_function(eth_dev);
1305                 i40e_set_tx_function(eth_dev);
1306                 return 0;
1307         }
1308
1309         rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
1310
1311         hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
1312         hw->device_id = eth_dev->pci_dev->id.device_id;
1313         hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
1314         hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
1315         hw->bus.device = eth_dev->pci_dev->addr.devid;
1316         hw->bus.func = eth_dev->pci_dev->addr.function;
1317         hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1318         hw->adapter_stopped = 0;
1319
1320         if(i40evf_init_vf(eth_dev) != 0) {
1321                 PMD_INIT_LOG(ERR, "Init vf failed");
1322                 return -1;
1323         }
1324
1325         /* copy mac addr */
1326         eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1327                                         ETHER_ADDR_LEN, 0);
1328         if (eth_dev->data->mac_addrs == NULL) {
1329                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
1330                                 "store MAC addresses", ETHER_ADDR_LEN);
1331                 return -ENOMEM;
1332         }
1333         ether_addr_copy((struct ether_addr *)hw->mac.addr,
1334                 (struct ether_addr *)eth_dev->data->mac_addrs);
1335
1336         return 0;
1337 }
1338
1339 static int
1340 i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
1341 {
1342         PMD_INIT_FUNC_TRACE();
1343
1344         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1345                 return -EPERM;
1346
1347         eth_dev->dev_ops = NULL;
1348         eth_dev->rx_pkt_burst = NULL;
1349         eth_dev->tx_pkt_burst = NULL;
1350
1351         if (i40evf_uninit_vf(eth_dev) != 0) {
1352                 PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
1353                 return -1;
1354         }
1355
1356         rte_free(eth_dev->data->mac_addrs);
1357         eth_dev->data->mac_addrs = NULL;
1358
1359         return 0;
1360 }
1361 /*
1362  * virtual function driver struct
1363  */
1364 static struct eth_driver rte_i40evf_pmd = {
1365         .pci_drv = {
1366                 .name = "rte_i40evf_pmd",
1367                 .id_table = pci_id_i40evf_map,
1368                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
1369         },
1370         .eth_dev_init = i40evf_dev_init,
1371         .eth_dev_uninit = i40evf_dev_uninit,
1372         .dev_private_size = sizeof(struct i40e_adapter),
1373 };
1374
1375 /*
1376  * VF Driver initialization routine.
1377  * Invoked one at EAL init time.
1378  * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
1379  */
1380 static int
1381 rte_i40evf_pmd_init(const char *name __rte_unused,
1382                     const char *params __rte_unused)
1383 {
1384         PMD_INIT_FUNC_TRACE();
1385
1386         rte_eth_driver_register(&rte_i40evf_pmd);
1387
1388         return 0;
1389 }
1390
1391 static struct rte_driver rte_i40evf_driver = {
1392         .type = PMD_PDEV,
1393         .init = rte_i40evf_pmd_init,
1394 };
1395
1396 PMD_REGISTER_DRIVER(rte_i40evf_driver);
1397
1398 static int
1399 i40evf_dev_configure(struct rte_eth_dev *dev)
1400 {
1401         struct i40e_adapter *ad =
1402                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1403
1404         /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1405          * allocation or vector Rx preconditions we will reset it.
1406          */
1407         ad->rx_bulk_alloc_allowed = true;
1408         ad->rx_vec_allowed = true;
1409         ad->tx_simple_allowed = true;
1410         ad->tx_vec_allowed = true;
1411
1412         return i40evf_init_vlan(dev);
1413 }
1414
1415 static int
1416 i40evf_init_vlan(struct rte_eth_dev *dev)
1417 {
1418         struct rte_eth_dev_data *data = dev->data;
1419         int ret;
1420
1421         /* Apply vlan offload setting */
1422         i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1423
1424         /* Apply pvid setting */
1425         ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
1426                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
1427         return ret;
1428 }
1429
1430 static void
1431 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1432 {
1433         bool enable_vlan_strip = 0;
1434         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1435         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1436
1437         /* Linux pf host doesn't support vlan offload yet */
1438         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1439                 /* Vlan stripping setting */
1440                 if (mask & ETH_VLAN_STRIP_MASK) {
1441                         /* Enable or disable VLAN stripping */
1442                         if (dev_conf->rxmode.hw_vlan_strip)
1443                                 enable_vlan_strip = 1;
1444                         else
1445                                 enable_vlan_strip = 0;
1446
1447                         i40evf_config_vlan_offload(dev, enable_vlan_strip);
1448                 }
1449         }
1450 }
1451
1452 static int
1453 i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1454 {
1455         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1456         struct i40e_vsi_vlan_pvid_info info;
1457         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1458
1459         memset(&info, 0, sizeof(info));
1460         info.on = on;
1461
1462         /* Linux pf host don't support vlan offload yet */
1463         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1464                 if (info.on)
1465                         info.config.pvid = pvid;
1466                 else {
1467                         info.config.reject.tagged =
1468                                 dev_conf->txmode.hw_vlan_reject_tagged;
1469                         info.config.reject.untagged =
1470                                 dev_conf->txmode.hw_vlan_reject_untagged;
1471                 }
1472                 return i40evf_config_vlan_pvid(dev, &info);
1473         }
1474
1475         return 0;
1476 }
1477
1478 static int
1479 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1480 {
1481         struct i40e_rx_queue *rxq;
1482         int err = 0;
1483         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1484
1485         PMD_INIT_FUNC_TRACE();
1486
1487         if (rx_queue_id < dev->data->nb_rx_queues) {
1488                 rxq = dev->data->rx_queues[rx_queue_id];
1489
1490                 err = i40e_alloc_rx_queue_mbufs(rxq);
1491                 if (err) {
1492                         PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1493                         return err;
1494                 }
1495
1496                 rte_wmb();
1497
1498                 /* Init the RX tail register. */
1499                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1500                 I40EVF_WRITE_FLUSH(hw);
1501
1502                 /* Ready to switch the queue on */
1503                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1504
1505                 if (err)
1506                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1507                                     rx_queue_id);
1508                 else
1509                         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1510         }
1511
1512         return err;
1513 }
1514
1515 static int
1516 i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1517 {
1518         struct i40e_rx_queue *rxq;
1519         int err;
1520
1521         if (rx_queue_id < dev->data->nb_rx_queues) {
1522                 rxq = dev->data->rx_queues[rx_queue_id];
1523
1524                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1525
1526                 if (err) {
1527                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1528                                     rx_queue_id);
1529                         return err;
1530                 }
1531
1532                 i40e_rx_queue_release_mbufs(rxq);
1533                 i40e_reset_rx_queue(rxq);
1534                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1535         }
1536
1537         return 0;
1538 }
1539
1540 static int
1541 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1542 {
1543         int err = 0;
1544
1545         PMD_INIT_FUNC_TRACE();
1546
1547         if (tx_queue_id < dev->data->nb_tx_queues) {
1548
1549                 /* Ready to switch the queue on */
1550                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1551
1552                 if (err)
1553                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1554                                     tx_queue_id);
1555                 else
1556                         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1557         }
1558
1559         return err;
1560 }
1561
1562 static int
1563 i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1564 {
1565         struct i40e_tx_queue *txq;
1566         int err;
1567
1568         if (tx_queue_id < dev->data->nb_tx_queues) {
1569                 txq = dev->data->tx_queues[tx_queue_id];
1570
1571                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1572
1573                 if (err) {
1574                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1575                                     tx_queue_id);
1576                         return err;
1577                 }
1578
1579                 i40e_tx_queue_release_mbufs(txq);
1580                 i40e_reset_tx_queue(txq);
1581                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1582         }
1583
1584         return 0;
1585 }
1586
1587 static int
1588 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1589 {
1590         int ret;
1591
1592         if (on)
1593                 ret = i40evf_add_vlan(dev, vlan_id);
1594         else
1595                 ret = i40evf_del_vlan(dev,vlan_id);
1596
1597         return ret;
1598 }
1599
1600 static int
1601 i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
1602 {
1603         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1604         struct rte_eth_dev_data *dev_data = dev->data;
1605         struct rte_pktmbuf_pool_private *mbp_priv;
1606         uint16_t buf_size, len;
1607
1608         rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
1609         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1610         I40EVF_WRITE_FLUSH(hw);
1611
1612         /* Calculate the maximum packet length allowed */
1613         mbp_priv = rte_mempool_get_priv(rxq->mp);
1614         buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
1615                                         RTE_PKTMBUF_HEADROOM);
1616         rxq->hs_mode = i40e_header_split_none;
1617         rxq->rx_hdr_len = 0;
1618         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
1619         len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
1620         rxq->max_pkt_len = RTE_MIN(len,
1621                 dev_data->dev_conf.rxmode.max_rx_pkt_len);
1622
1623         /**
1624          * Check if the jumbo frame and maximum packet length are set correctly
1625          */
1626         if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
1627                 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
1628                     rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1629                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1630                                 "larger than %u and smaller than %u, as jumbo "
1631                                 "frame is enabled", (uint32_t)ETHER_MAX_LEN,
1632                                         (uint32_t)I40E_FRAME_SIZE_MAX);
1633                         return I40E_ERR_CONFIG;
1634                 }
1635         } else {
1636                 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
1637                     rxq->max_pkt_len > ETHER_MAX_LEN) {
1638                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1639                                 "larger than %u and smaller than %u, as jumbo "
1640                                 "frame is disabled", (uint32_t)ETHER_MIN_LEN,
1641                                                 (uint32_t)ETHER_MAX_LEN);
1642                         return I40E_ERR_CONFIG;
1643                 }
1644         }
1645
1646         if (dev_data->dev_conf.rxmode.enable_scatter ||
1647             (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
1648                 dev_data->scattered_rx = 1;
1649         }
1650
1651         return 0;
1652 }
1653
1654 static int
1655 i40evf_rx_init(struct rte_eth_dev *dev)
1656 {
1657         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1658         uint16_t i;
1659         int ret = I40E_SUCCESS;
1660         struct i40e_rx_queue **rxq =
1661                 (struct i40e_rx_queue **)dev->data->rx_queues;
1662
1663         i40evf_config_rss(vf);
1664         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1665                 if (!rxq[i] || !rxq[i]->q_set)
1666                         continue;
1667                 ret = i40evf_rxq_init(dev, rxq[i]);
1668                 if (ret != I40E_SUCCESS)
1669                         break;
1670         }
1671         if (ret == I40E_SUCCESS)
1672                 i40e_set_rx_function(dev);
1673
1674         return ret;
1675 }
1676
1677 static void
1678 i40evf_tx_init(struct rte_eth_dev *dev)
1679 {
1680         uint16_t i;
1681         struct i40e_tx_queue **txq =
1682                 (struct i40e_tx_queue **)dev->data->tx_queues;
1683         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1684
1685         for (i = 0; i < dev->data->nb_tx_queues; i++)
1686                 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1687
1688         i40e_set_tx_function(dev);
1689 }
1690
1691 static inline void
1692 i40evf_enable_queues_intr(struct rte_eth_dev *dev)
1693 {
1694         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1695         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1696         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1697
1698         if (!rte_intr_allow_others(intr_handle)) {
1699                 I40E_WRITE_REG(hw,
1700                                I40E_VFINT_DYN_CTL01,
1701                                I40E_VFINT_DYN_CTL01_INTENA_MASK |
1702                                I40E_VFINT_DYN_CTL01_CLEARPBA_MASK);
1703                 I40EVF_WRITE_FLUSH(hw);
1704                 return;
1705         }
1706
1707         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1708                 /* To support DPDK PF host */
1709                 I40E_WRITE_REG(hw,
1710                         I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1711                         I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1712                         I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1713         else
1714                 /* To support Linux PF host */
1715                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1716                                 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1717                                 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK);
1718
1719         I40EVF_WRITE_FLUSH(hw);
1720 }
1721
1722 static inline void
1723 i40evf_disable_queues_intr(struct rte_eth_dev *dev)
1724 {
1725         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1726         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1727         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1728
1729         if (!rte_intr_allow_others(intr_handle)) {
1730                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1731                 I40EVF_WRITE_FLUSH(hw);
1732                 return;
1733         }
1734
1735         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1736                 I40E_WRITE_REG(hw,
1737                                I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR
1738                                                     - 1),
1739                                0);
1740         else
1741                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1742
1743         I40EVF_WRITE_FLUSH(hw);
1744 }
1745
1746 static int
1747 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1748 {
1749         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1750         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1751         uint16_t interval =
1752                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1753         uint16_t msix_intr;
1754
1755         msix_intr = intr_handle->intr_vec[queue_id];
1756         if (msix_intr == I40E_MISC_VEC_ID)
1757                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1758                                I40E_VFINT_DYN_CTL01_INTENA_MASK |
1759                                I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1760                                (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
1761                                (interval <<
1762                                 I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT));
1763         else
1764                 I40E_WRITE_REG(hw,
1765                                I40E_VFINT_DYN_CTLN1(msix_intr -
1766                                                     I40E_RX_VEC_START),
1767                                I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1768                                I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1769                                (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1770                                (interval <<
1771                                 I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
1772
1773         I40EVF_WRITE_FLUSH(hw);
1774
1775         rte_intr_enable(&dev->pci_dev->intr_handle);
1776
1777         return 0;
1778 }
1779
1780 static int
1781 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1782 {
1783         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1784         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1785         uint16_t msix_intr;
1786
1787         msix_intr = intr_handle->intr_vec[queue_id];
1788         if (msix_intr == I40E_MISC_VEC_ID)
1789                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1790         else
1791                 I40E_WRITE_REG(hw,
1792                                I40E_VFINT_DYN_CTLN1(msix_intr -
1793                                                     I40E_RX_VEC_START),
1794                                0);
1795
1796         I40EVF_WRITE_FLUSH(hw);
1797
1798         return 0;
1799 }
1800
1801 static int
1802 i40evf_dev_start(struct rte_eth_dev *dev)
1803 {
1804         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1805         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1806         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1807         struct ether_addr mac_addr;
1808         uint32_t intr_vector = 0;
1809
1810         PMD_INIT_FUNC_TRACE();
1811
1812         hw->adapter_stopped = 0;
1813
1814         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1815         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1816                                         dev->data->nb_tx_queues);
1817
1818         /* check and configure queue intr-vector mapping */
1819         if (dev->data->dev_conf.intr_conf.rxq != 0) {
1820                 intr_vector = dev->data->nb_rx_queues;
1821                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1822                         return -1;
1823         }
1824
1825         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1826                 intr_handle->intr_vec =
1827                         rte_zmalloc("intr_vec",
1828                                     dev->data->nb_rx_queues * sizeof(int), 0);
1829                 if (!intr_handle->intr_vec) {
1830                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1831                                      " intr_vec\n", dev->data->nb_rx_queues);
1832                         return -ENOMEM;
1833                 }
1834         }
1835
1836         if (i40evf_rx_init(dev) != 0){
1837                 PMD_DRV_LOG(ERR, "failed to do RX init");
1838                 return -1;
1839         }
1840
1841         i40evf_tx_init(dev);
1842
1843         if (i40evf_configure_queues(dev) != 0) {
1844                 PMD_DRV_LOG(ERR, "configure queues failed");
1845                 goto err_queue;
1846         }
1847         if (i40evf_config_irq_map(dev)) {
1848                 PMD_DRV_LOG(ERR, "config_irq_map failed");
1849                 goto err_queue;
1850         }
1851
1852         /* Set mac addr */
1853         (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
1854                                 sizeof(mac_addr.addr_bytes));
1855         if (i40evf_add_mac_addr(dev, &mac_addr)) {
1856                 PMD_DRV_LOG(ERR, "Failed to add mac addr");
1857                 goto err_queue;
1858         }
1859
1860         if (i40evf_start_queues(dev) != 0) {
1861                 PMD_DRV_LOG(ERR, "enable queues failed");
1862                 goto err_mac;
1863         }
1864
1865         /* vf don't allow intr except for rxq intr */
1866         if (dev->data->dev_conf.intr_conf.rxq != 0)
1867                 rte_intr_enable(intr_handle);
1868
1869         i40evf_enable_queues_intr(dev);
1870         return 0;
1871
1872 err_mac:
1873         i40evf_del_mac_addr(dev, &mac_addr);
1874 err_queue:
1875         return -1;
1876 }
1877
1878 static void
1879 i40evf_dev_stop(struct rte_eth_dev *dev)
1880 {
1881         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1882         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1883         struct ether_addr mac_addr;
1884
1885         PMD_INIT_FUNC_TRACE();
1886
1887         i40evf_stop_queues(dev);
1888         i40evf_disable_queues_intr(dev);
1889         i40e_dev_clear_queues(dev);
1890
1891         /* Clean datapath event and queue/vec mapping */
1892         rte_intr_efd_disable(intr_handle);
1893         if (intr_handle->intr_vec) {
1894                 rte_free(intr_handle->intr_vec);
1895                 intr_handle->intr_vec = NULL;
1896         }
1897         /* Set mac addr */
1898         (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
1899                                 sizeof(mac_addr.addr_bytes));
1900         /* Delete mac addr of this vf */
1901         i40evf_del_mac_addr(dev, &mac_addr);
1902 }
1903
1904 static int
1905 i40evf_dev_link_update(struct rte_eth_dev *dev,
1906                        __rte_unused int wait_to_complete)
1907 {
1908         struct rte_eth_link new_link;
1909         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1910         /*
1911          * DPDK pf host provide interfacet to acquire link status
1912          * while Linux driver does not
1913          */
1914         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1915                 i40evf_get_link_status(dev, &new_link);
1916         else {
1917                 /* Always assume it's up, for Linux driver PF host */
1918                 new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
1919                 new_link.link_speed  = ETH_LINK_SPEED_10000;
1920                 new_link.link_status = 1;
1921         }
1922         i40evf_dev_atomic_write_link_status(dev, &new_link);
1923
1924         return 0;
1925 }
1926
1927 static void
1928 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1929 {
1930         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1931         int ret;
1932
1933         /* If enabled, just return */
1934         if (vf->promisc_unicast_enabled)
1935                 return;
1936
1937         ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
1938         if (ret == 0)
1939                 vf->promisc_unicast_enabled = TRUE;
1940 }
1941
1942 static void
1943 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1944 {
1945         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1946         int ret;
1947
1948         /* If disabled, just return */
1949         if (!vf->promisc_unicast_enabled)
1950                 return;
1951
1952         ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
1953         if (ret == 0)
1954                 vf->promisc_unicast_enabled = FALSE;
1955 }
1956
1957 static void
1958 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1959 {
1960         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1961         int ret;
1962
1963         /* If enabled, just return */
1964         if (vf->promisc_multicast_enabled)
1965                 return;
1966
1967         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
1968         if (ret == 0)
1969                 vf->promisc_multicast_enabled = TRUE;
1970 }
1971
1972 static void
1973 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1974 {
1975         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1976         int ret;
1977
1978         /* If enabled, just return */
1979         if (!vf->promisc_multicast_enabled)
1980                 return;
1981
1982         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
1983         if (ret == 0)
1984                 vf->promisc_multicast_enabled = FALSE;
1985 }
1986
1987 static void
1988 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1989 {
1990         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1991
1992         memset(dev_info, 0, sizeof(*dev_info));
1993         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
1994         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
1995         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1996         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1997         dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1998         dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
1999         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2000         dev_info->rx_offload_capa =
2001                 DEV_RX_OFFLOAD_VLAN_STRIP |
2002                 DEV_RX_OFFLOAD_QINQ_STRIP |
2003                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2004                 DEV_RX_OFFLOAD_UDP_CKSUM |
2005                 DEV_RX_OFFLOAD_TCP_CKSUM;
2006         dev_info->tx_offload_capa =
2007                 DEV_TX_OFFLOAD_VLAN_INSERT |
2008                 DEV_TX_OFFLOAD_QINQ_INSERT |
2009                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2010                 DEV_TX_OFFLOAD_UDP_CKSUM |
2011                 DEV_TX_OFFLOAD_TCP_CKSUM |
2012                 DEV_TX_OFFLOAD_SCTP_CKSUM;
2013
2014         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2015                 .rx_thresh = {
2016                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
2017                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
2018                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
2019                 },
2020                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2021                 .rx_drop_en = 0,
2022         };
2023
2024         dev_info->default_txconf = (struct rte_eth_txconf) {
2025                 .tx_thresh = {
2026                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
2027                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
2028                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
2029                 },
2030                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2031                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2032                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2033                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2034         };
2035
2036         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2037                 .nb_max = I40E_MAX_RING_DESC,
2038                 .nb_min = I40E_MIN_RING_DESC,
2039                 .nb_align = I40E_ALIGN_RING_DESC,
2040         };
2041
2042         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2043                 .nb_max = I40E_MAX_RING_DESC,
2044                 .nb_min = I40E_MIN_RING_DESC,
2045                 .nb_align = I40E_ALIGN_RING_DESC,
2046         };
2047 }
2048
2049 static void
2050 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2051 {
2052         if (i40evf_get_statics(dev, stats))
2053                 PMD_DRV_LOG(ERR, "Get statics failed");
2054 }
2055
2056 static void
2057 i40evf_dev_close(struct rte_eth_dev *dev)
2058 {
2059         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2060
2061         i40evf_dev_stop(dev);
2062         hw->adapter_stopped = 1;
2063         i40e_dev_free_queues(dev);
2064         i40evf_reset_vf(hw);
2065         i40e_shutdown_adminq(hw);
2066 }
2067
2068 static int
2069 i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2070 {
2071         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2072         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2073         int ret;
2074
2075         if (!lut)
2076                 return -EINVAL;
2077
2078         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2079                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
2080                                           lut, lut_size);
2081                 if (ret) {
2082                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2083                         return ret;
2084                 }
2085         } else {
2086                 uint32_t *lut_dw = (uint32_t *)lut;
2087                 uint16_t i, lut_size_dw = lut_size / 4;
2088
2089                 for (i = 0; i < lut_size_dw; i++)
2090                         lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
2091         }
2092
2093         return 0;
2094 }
2095
2096 static int
2097 i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2098 {
2099         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2100         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2101         int ret;
2102
2103         if (!vsi || !lut)
2104                 return -EINVAL;
2105
2106         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2107                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
2108                                           lut, lut_size);
2109                 if (ret) {
2110                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2111                         return ret;
2112                 }
2113         } else {
2114                 uint32_t *lut_dw = (uint32_t *)lut;
2115                 uint16_t i, lut_size_dw = lut_size / 4;
2116
2117                 for (i = 0; i < lut_size_dw; i++)
2118                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
2119                 I40EVF_WRITE_FLUSH(hw);
2120         }
2121
2122         return 0;
2123 }
2124
2125 static int
2126 i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
2127                            struct rte_eth_rss_reta_entry64 *reta_conf,
2128                            uint16_t reta_size)
2129 {
2130         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2131         uint8_t *lut;
2132         uint16_t i, idx, shift;
2133         int ret;
2134
2135         if (reta_size != ETH_RSS_RETA_SIZE_64) {
2136                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2137                         "(%d) doesn't match the number of hardware can "
2138                         "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
2139                 return -EINVAL;
2140         }
2141
2142         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2143         if (!lut) {
2144                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2145                 return -ENOMEM;
2146         }
2147         ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2148         if (ret)
2149                 goto out;
2150         for (i = 0; i < reta_size; i++) {
2151                 idx = i / RTE_RETA_GROUP_SIZE;
2152                 shift = i % RTE_RETA_GROUP_SIZE;
2153                 if (reta_conf[idx].mask & (1ULL << shift))
2154                         lut[i] = reta_conf[idx].reta[shift];
2155         }
2156         ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
2157
2158 out:
2159         rte_free(lut);
2160
2161         return ret;
2162 }
2163
2164 static int
2165 i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
2166                           struct rte_eth_rss_reta_entry64 *reta_conf,
2167                           uint16_t reta_size)
2168 {
2169         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2170         uint16_t i, idx, shift;
2171         uint8_t *lut;
2172         int ret;
2173
2174         if (reta_size != ETH_RSS_RETA_SIZE_64) {
2175                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2176                         "(%d) doesn't match the number of hardware can "
2177                         "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
2178                 return -EINVAL;
2179         }
2180
2181         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2182         if (!lut) {
2183                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2184                 return -ENOMEM;
2185         }
2186
2187         ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2188         if (ret)
2189                 goto out;
2190         for (i = 0; i < reta_size; i++) {
2191                 idx = i / RTE_RETA_GROUP_SIZE;
2192                 shift = i % RTE_RETA_GROUP_SIZE;
2193                 if (reta_conf[idx].mask & (1ULL << shift))
2194                         reta_conf[idx].reta[shift] = lut[i];
2195         }
2196
2197 out:
2198         rte_free(lut);
2199
2200         return ret;
2201 }
2202
2203 static int
2204 i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
2205 {
2206         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2207         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2208         int ret = 0;
2209
2210         if (!key || key_len == 0) {
2211                 PMD_DRV_LOG(DEBUG, "No key to be configured");
2212                 return 0;
2213         } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2214                 sizeof(uint32_t)) {
2215                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2216                 return -EINVAL;
2217         }
2218
2219         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2220                 struct i40e_aqc_get_set_rss_key_data *key_dw =
2221                         (struct i40e_aqc_get_set_rss_key_data *)key;
2222
2223                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
2224                 if (ret)
2225                         PMD_INIT_LOG(ERR, "Failed to configure RSS key "
2226                                      "via AQ");
2227         } else {
2228                 uint32_t *hash_key = (uint32_t *)key;
2229                 uint16_t i;
2230
2231                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2232                         I40E_WRITE_REG(hw, I40E_VFQF_HKEY(i), hash_key[i]);
2233                 I40EVF_WRITE_FLUSH(hw);
2234         }
2235
2236         return ret;
2237 }
2238
2239 static int
2240 i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
2241 {
2242         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2243         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2244         int ret;
2245
2246         if (!key || !key_len)
2247                 return -EINVAL;
2248
2249         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2250                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
2251                         (struct i40e_aqc_get_set_rss_key_data *)key);
2252                 if (ret) {
2253                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
2254                         return ret;
2255                 }
2256         } else {
2257                 uint32_t *key_dw = (uint32_t *)key;
2258                 uint16_t i;
2259
2260                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2261                         key_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HKEY(i));
2262         }
2263         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2264
2265         return 0;
2266 }
2267
2268 static int
2269 i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
2270 {
2271         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2272         uint64_t rss_hf, hena;
2273         int ret;
2274
2275         ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
2276                                  rss_conf->rss_key_len);
2277         if (ret)
2278                 return ret;
2279
2280         rss_hf = rss_conf->rss_hf;
2281         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2282         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2283         hena &= ~I40E_RSS_HENA_ALL;
2284         hena |= i40e_config_hena(rss_hf);
2285         I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2286         I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2287         I40EVF_WRITE_FLUSH(hw);
2288
2289         return 0;
2290 }
2291
2292 static void
2293 i40evf_disable_rss(struct i40e_vf *vf)
2294 {
2295         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2296         uint64_t hena;
2297
2298         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2299         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2300         hena &= ~I40E_RSS_HENA_ALL;
2301         I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2302         I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2303         I40EVF_WRITE_FLUSH(hw);
2304 }
2305
2306 static int
2307 i40evf_config_rss(struct i40e_vf *vf)
2308 {
2309         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2310         struct rte_eth_rss_conf rss_conf;
2311         uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
2312         uint16_t num;
2313
2314         if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
2315                 i40evf_disable_rss(vf);
2316                 PMD_DRV_LOG(DEBUG, "RSS not configured\n");
2317                 return 0;
2318         }
2319
2320         num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
2321         /* Fill out the look up table */
2322         for (i = 0, j = 0; i < nb_q; i++, j++) {
2323                 if (j >= num)
2324                         j = 0;
2325                 lut = (lut << 8) | j;
2326                 if ((i & 3) == 3)
2327                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
2328         }
2329
2330         rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
2331         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
2332                 i40evf_disable_rss(vf);
2333                 PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
2334                 return 0;
2335         }
2336
2337         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
2338                 (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
2339                 /* Calculate the default hash key */
2340                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2341                         rss_key_default[i] = (uint32_t)rte_rand();
2342                 rss_conf.rss_key = (uint8_t *)rss_key_default;
2343                 rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2344                         sizeof(uint32_t);
2345         }
2346
2347         return i40evf_hw_rss_hash_set(vf, &rss_conf);
2348 }
2349
2350 static int
2351 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
2352                            struct rte_eth_rss_conf *rss_conf)
2353 {
2354         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2355         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2356         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
2357         uint64_t hena;
2358
2359         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2360         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2361         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
2362                 if (rss_hf != 0) /* Enable RSS */
2363                         return -EINVAL;
2364                 return 0;
2365         }
2366
2367         /* RSS enabled */
2368         if (rss_hf == 0) /* Disable RSS */
2369                 return -EINVAL;
2370
2371         return i40evf_hw_rss_hash_set(vf, rss_conf);
2372 }
2373
2374 static int
2375 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2376                              struct rte_eth_rss_conf *rss_conf)
2377 {
2378         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2379         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2380         uint64_t hena;
2381
2382         i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
2383                            &rss_conf->rss_key_len);
2384
2385         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2386         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2387         rss_conf->rss_hf = i40e_parse_hena(hena);
2388
2389         return 0;
2390 }