i40e: adjust the number of queues for RSS
[dpdk.git] / drivers / net / i40e / i40e_ethdev_vf.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
45
46 #include <rte_interrupts.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_eal.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_atomic.h>
59 #include <rte_malloc.h>
60 #include <rte_dev.h>
61
62 #include "i40e_logs.h"
63 #include "base/i40e_prototype.h"
64 #include "base/i40e_adminq_cmd.h"
65 #include "base/i40e_type.h"
66
67 #include "i40e_rxtx.h"
68 #include "i40e_ethdev.h"
69 #include "i40e_pf.h"
70 #define I40EVF_VSI_DEFAULT_MSIX_INTR     1
71 #define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
72
73 /* busy wait delay in msec */
74 #define I40EVF_BUSY_WAIT_DELAY 10
75 #define I40EVF_BUSY_WAIT_COUNT 50
76 #define MAX_RESET_WAIT_CNT     20
77 /*ITR index for NOITR*/
78 #define I40E_QINT_RQCTL_MSIX_INDX_NOITR     3
79
80 struct i40evf_arq_msg_info {
81         enum i40e_virtchnl_ops ops;
82         enum i40e_status_code result;
83         uint16_t buf_len;
84         uint16_t msg_len;
85         uint8_t *msg;
86 };
87
88 struct vf_cmd_info {
89         enum i40e_virtchnl_ops ops;
90         uint8_t *in_args;
91         uint32_t in_args_size;
92         uint8_t *out_buffer;
93         /* Input & output type. pass in buffer size and pass out
94          * actual return result
95          */
96         uint32_t out_size;
97 };
98
99 enum i40evf_aq_result {
100         I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
101         I40EVF_MSG_NON,      /* Read nothing from admin queue */
102         I40EVF_MSG_SYS,      /* Read system msg from admin queue */
103         I40EVF_MSG_CMD,      /* Read async command result */
104 };
105
106 /* A share buffer to store the command result from PF driver */
107 static uint8_t cmd_result_buffer[I40E_AQ_BUF_SZ];
108
109 static int i40evf_dev_configure(struct rte_eth_dev *dev);
110 static int i40evf_dev_start(struct rte_eth_dev *dev);
111 static void i40evf_dev_stop(struct rte_eth_dev *dev);
112 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
113                                 struct rte_eth_dev_info *dev_info);
114 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
115                                   __rte_unused int wait_to_complete);
116 static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
117                                 struct rte_eth_stats *stats);
118 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
119                                  struct rte_eth_xstats *xstats, unsigned n);
120 static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
121 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
122                                   uint16_t vlan_id, int on);
123 static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
124 static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
125                                 int on);
126 static void i40evf_dev_close(struct rte_eth_dev *dev);
127 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
128 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
129 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
130 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
131 static int i40evf_get_link_status(struct rte_eth_dev *dev,
132                                   struct rte_eth_link *link);
133 static int i40evf_init_vlan(struct rte_eth_dev *dev);
134 static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
135                                      uint16_t rx_queue_id);
136 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
137                                     uint16_t rx_queue_id);
138 static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
139                                      uint16_t tx_queue_id);
140 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
141                                     uint16_t tx_queue_id);
142 static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
143                         struct rte_eth_rss_reta_entry64 *reta_conf,
144                         uint16_t reta_size);
145 static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
146                         struct rte_eth_rss_reta_entry64 *reta_conf,
147                         uint16_t reta_size);
148 static int i40evf_config_rss(struct i40e_vf *vf);
149 static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
150                                       struct rte_eth_rss_conf *rss_conf);
151 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
152                                         struct rte_eth_rss_conf *rss_conf);
153
154 /* Default hash key buffer for RSS */
155 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
156
157 struct rte_i40evf_xstats_name_off {
158         char name[RTE_ETH_XSTATS_NAME_SIZE];
159         unsigned offset;
160 };
161
162 static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
163         {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
164         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
165         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
166         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
167         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
168         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
169                 rx_unknown_protocol)},
170         {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
171         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
172         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
173         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
174         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
175         {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
176 };
177
178 #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
179                 sizeof(rte_i40evf_stats_strings[0]))
180
181 static const struct eth_dev_ops i40evf_eth_dev_ops = {
182         .dev_configure        = i40evf_dev_configure,
183         .dev_start            = i40evf_dev_start,
184         .dev_stop             = i40evf_dev_stop,
185         .promiscuous_enable   = i40evf_dev_promiscuous_enable,
186         .promiscuous_disable  = i40evf_dev_promiscuous_disable,
187         .allmulticast_enable  = i40evf_dev_allmulticast_enable,
188         .allmulticast_disable = i40evf_dev_allmulticast_disable,
189         .link_update          = i40evf_dev_link_update,
190         .stats_get            = i40evf_dev_stats_get,
191         .xstats_get           = i40evf_dev_xstats_get,
192         .xstats_reset         = i40evf_dev_xstats_reset,
193         .dev_close            = i40evf_dev_close,
194         .dev_infos_get        = i40evf_dev_info_get,
195         .vlan_filter_set      = i40evf_vlan_filter_set,
196         .vlan_offload_set     = i40evf_vlan_offload_set,
197         .vlan_pvid_set        = i40evf_vlan_pvid_set,
198         .rx_queue_start       = i40evf_dev_rx_queue_start,
199         .rx_queue_stop        = i40evf_dev_rx_queue_stop,
200         .tx_queue_start       = i40evf_dev_tx_queue_start,
201         .tx_queue_stop        = i40evf_dev_tx_queue_stop,
202         .rx_queue_setup       = i40e_dev_rx_queue_setup,
203         .rx_queue_release     = i40e_dev_rx_queue_release,
204         .tx_queue_setup       = i40e_dev_tx_queue_setup,
205         .tx_queue_release     = i40e_dev_tx_queue_release,
206         .reta_update          = i40evf_dev_rss_reta_update,
207         .reta_query           = i40evf_dev_rss_reta_query,
208         .rss_hash_update      = i40evf_dev_rss_hash_update,
209         .rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
210 };
211
212 static int
213 i40evf_set_mac_type(struct i40e_hw *hw)
214 {
215         int status = I40E_ERR_DEVICE_NOT_SUPPORTED;
216
217         if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
218                 switch (hw->device_id) {
219                 case I40E_DEV_ID_VF:
220                 case I40E_DEV_ID_VF_HV:
221                         hw->mac.type = I40E_MAC_VF;
222                         status = I40E_SUCCESS;
223                         break;
224                 default:
225                         ;
226                 }
227         }
228
229         return status;
230 }
231
232 /*
233  * Parse admin queue message.
234  *
235  * return value:
236  *  < 0: meet error
237  *  0: read sys msg
238  *  > 0: read cmd result
239  */
240 static enum i40evf_aq_result
241 i40evf_parse_pfmsg(struct i40e_vf *vf,
242                    struct i40e_arq_event_info *event,
243                    struct i40evf_arq_msg_info *data)
244 {
245         enum i40e_virtchnl_ops opcode = (enum i40e_virtchnl_ops)\
246                         rte_le_to_cpu_32(event->desc.cookie_high);
247         enum i40e_status_code retval = (enum i40e_status_code)\
248                         rte_le_to_cpu_32(event->desc.cookie_low);
249         enum i40evf_aq_result ret = I40EVF_MSG_CMD;
250
251         /* pf sys event */
252         if (opcode == I40E_VIRTCHNL_OP_EVENT) {
253                 struct i40e_virtchnl_pf_event *vpe =
254                         (struct i40e_virtchnl_pf_event *)event->msg_buf;
255
256                 /* Initialize ret to sys event */
257                 ret = I40EVF_MSG_SYS;
258                 switch (vpe->event) {
259                 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
260                         vf->link_up =
261                                 vpe->event_data.link_event.link_status;
262                         vf->pend_msg |= PFMSG_LINK_CHANGE;
263                         PMD_DRV_LOG(INFO, "Link status update:%s",
264                                     vf->link_up ? "up" : "down");
265                         break;
266                 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
267                         vf->vf_reset = true;
268                         vf->pend_msg |= PFMSG_RESET_IMPENDING;
269                         PMD_DRV_LOG(INFO, "vf is reseting");
270                         break;
271                 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
272                         vf->dev_closed = true;
273                         vf->pend_msg |= PFMSG_DRIVER_CLOSE;
274                         PMD_DRV_LOG(INFO, "PF driver closed");
275                         break;
276                 default:
277                         PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
278                                     __func__, vpe->event);
279                 }
280         } else {
281                 /* async reply msg on command issued by vf previously */
282                 ret = I40EVF_MSG_CMD;
283                 /* Actual data length read from PF */
284                 data->msg_len = event->msg_len;
285         }
286         /* fill the ops and result to notify VF */
287         data->result = retval;
288         data->ops = opcode;
289
290         return ret;
291 }
292
293 /*
294  * Read data in admin queue to get msg from pf driver
295  */
296 static enum i40evf_aq_result
297 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
298 {
299         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
300         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
301         struct i40e_arq_event_info event;
302         int ret;
303         enum i40evf_aq_result result = I40EVF_MSG_NON;
304
305         event.buf_len = data->buf_len;
306         event.msg_buf = data->msg;
307         ret = i40e_clean_arq_element(hw, &event, NULL);
308         /* Can't read any msg from adminQ */
309         if (ret) {
310                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
311                         result = I40EVF_MSG_NON;
312                 else
313                         result = I40EVF_MSG_ERR;
314                 return result;
315         }
316
317         /* Parse the event */
318         result = i40evf_parse_pfmsg(vf, &event, data);
319
320         return result;
321 }
322
323 /*
324  * Polling read until command result return from pf driver or meet error.
325  */
326 static int
327 i40evf_wait_cmd_done(struct rte_eth_dev *dev,
328                      struct i40evf_arq_msg_info *data)
329 {
330         int i = 0;
331         enum i40evf_aq_result ret;
332
333 #define MAX_TRY_TIMES 20
334 #define ASQ_DELAY_MS  100
335         do {
336                 /* Delay some time first */
337                 rte_delay_ms(ASQ_DELAY_MS);
338                 ret = i40evf_read_pfmsg(dev, data);
339                 if (ret == I40EVF_MSG_CMD)
340                         return 0;
341                 else if (ret == I40EVF_MSG_ERR)
342                         return -1;
343
344                 /* If don't read msg or read sys event, continue */
345         } while(i++ < MAX_TRY_TIMES);
346
347         return -1;
348 }
349
350 /**
351  * clear current command. Only call in case execute
352  * _atomic_set_cmd successfully.
353  */
354 static inline void
355 _clear_cmd(struct i40e_vf *vf)
356 {
357         rte_wmb();
358         vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
359 }
360
361 /*
362  * Check there is pending cmd in execution. If none, set new command.
363  */
364 static inline int
365 _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
366 {
367         int ret = rte_atomic32_cmpset(&vf->pend_cmd,
368                         I40E_VIRTCHNL_OP_UNKNOWN, ops);
369
370         if (!ret)
371                 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
372
373         return !ret;
374 }
375
376 static int
377 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
378 {
379         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
380         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
381         int err = -1;
382         struct i40evf_arq_msg_info info;
383
384         if (_atomic_set_cmd(vf, args->ops))
385                 return -1;
386
387         info.msg = args->out_buffer;
388         info.buf_len = args->out_size;
389         info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
390         info.result = I40E_SUCCESS;
391
392         err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
393                      args->in_args, args->in_args_size, NULL);
394         if (err) {
395                 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
396                 _clear_cmd(vf);
397                 return err;
398         }
399
400         err = i40evf_wait_cmd_done(dev, &info);
401         /* read message and it's expected one */
402         if (!err && args->ops == info.ops)
403                 _clear_cmd(vf);
404         else if (err) {
405                 PMD_DRV_LOG(ERR, "Failed to read message from AdminQ");
406                 _clear_cmd(vf);
407         }
408         else if (args->ops != info.ops)
409                 PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
410                             args->ops, info.ops);
411
412         return (err | info.result);
413 }
414
415 /*
416  * Check API version with sync wait until version read or fail from admin queue
417  */
418 static int
419 i40evf_check_api_version(struct rte_eth_dev *dev)
420 {
421         struct i40e_virtchnl_version_info version, *pver;
422         int err;
423         struct vf_cmd_info args;
424         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
425
426         version.major = I40E_VIRTCHNL_VERSION_MAJOR;
427         version.minor = I40E_VIRTCHNL_VERSION_MINOR;
428
429         args.ops = I40E_VIRTCHNL_OP_VERSION;
430         args.in_args = (uint8_t *)&version;
431         args.in_args_size = sizeof(version);
432         args.out_buffer = cmd_result_buffer;
433         args.out_size = I40E_AQ_BUF_SZ;
434
435         err = i40evf_execute_vf_cmd(dev, &args);
436         if (err) {
437                 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
438                 return err;
439         }
440
441         pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
442         vf->version_major = pver->major;
443         vf->version_minor = pver->minor;
444         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
445                 PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
446         else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
447                 (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR))
448                 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
449         else {
450                 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
451                                         vf->version_major, vf->version_minor,
452                                                 I40E_VIRTCHNL_VERSION_MAJOR,
453                                                 I40E_VIRTCHNL_VERSION_MINOR);
454                 return -1;
455         }
456
457         return 0;
458 }
459
460 static int
461 i40evf_get_vf_resource(struct rte_eth_dev *dev)
462 {
463         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
464         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
465         int err;
466         struct vf_cmd_info args;
467         uint32_t caps, len;
468
469         args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
470         args.out_buffer = cmd_result_buffer;
471         args.out_size = I40E_AQ_BUF_SZ;
472         if (PF_IS_V11(vf)) {
473                 caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
474                        I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
475                        I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
476                        I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
477                        I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
478                 args.in_args = (uint8_t *)&caps;
479                 args.in_args_size = sizeof(caps);
480         } else {
481                 args.in_args = NULL;
482                 args.in_args_size = 0;
483         }
484         err = i40evf_execute_vf_cmd(dev, &args);
485
486         if (err) {
487                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
488                 return err;
489         }
490
491         len =  sizeof(struct i40e_virtchnl_vf_resource) +
492                 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
493
494         (void)rte_memcpy(vf->vf_res, args.out_buffer,
495                         RTE_MIN(args.out_size, len));
496         i40e_vf_parse_hw_config(hw, vf->vf_res);
497
498         return 0;
499 }
500
501 static int
502 i40evf_config_promisc(struct rte_eth_dev *dev,
503                       bool enable_unicast,
504                       bool enable_multicast)
505 {
506         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
507         int err;
508         struct vf_cmd_info args;
509         struct i40e_virtchnl_promisc_info promisc;
510
511         promisc.flags = 0;
512         promisc.vsi_id = vf->vsi_res->vsi_id;
513
514         if (enable_unicast)
515                 promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
516
517         if (enable_multicast)
518                 promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
519
520         args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
521         args.in_args = (uint8_t *)&promisc;
522         args.in_args_size = sizeof(promisc);
523         args.out_buffer = cmd_result_buffer;
524         args.out_size = I40E_AQ_BUF_SZ;
525
526         err = i40evf_execute_vf_cmd(dev, &args);
527
528         if (err)
529                 PMD_DRV_LOG(ERR, "fail to execute command "
530                             "CONFIG_PROMISCUOUS_MODE");
531         return err;
532 }
533
534 /* Configure vlan and double vlan offload. Use flag to specify which part to configure */
535 static int
536 i40evf_config_vlan_offload(struct rte_eth_dev *dev,
537                                 bool enable_vlan_strip)
538 {
539         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
540         int err;
541         struct vf_cmd_info args;
542         struct i40e_virtchnl_vlan_offload_info offload;
543
544         offload.vsi_id = vf->vsi_res->vsi_id;
545         offload.enable_vlan_strip = enable_vlan_strip;
546
547         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
548         args.in_args = (uint8_t *)&offload;
549         args.in_args_size = sizeof(offload);
550         args.out_buffer = cmd_result_buffer;
551         args.out_size = I40E_AQ_BUF_SZ;
552
553         err = i40evf_execute_vf_cmd(dev, &args);
554         if (err)
555                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
556
557         return err;
558 }
559
560 static int
561 i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
562                                 struct i40e_vsi_vlan_pvid_info *info)
563 {
564         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
565         int err;
566         struct vf_cmd_info args;
567         struct i40e_virtchnl_pvid_info tpid_info;
568
569         if (dev == NULL || info == NULL) {
570                 PMD_DRV_LOG(ERR, "invalid parameters");
571                 return I40E_ERR_PARAM;
572         }
573
574         memset(&tpid_info, 0, sizeof(tpid_info));
575         tpid_info.vsi_id = vf->vsi_res->vsi_id;
576         (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
577
578         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
579         args.in_args = (uint8_t *)&tpid_info;
580         args.in_args_size = sizeof(tpid_info);
581         args.out_buffer = cmd_result_buffer;
582         args.out_size = I40E_AQ_BUF_SZ;
583
584         err = i40evf_execute_vf_cmd(dev, &args);
585         if (err)
586                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
587
588         return err;
589 }
590
591 static void
592 i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
593                                   uint16_t vsi_id,
594                                   uint16_t queue_id,
595                                   uint16_t nb_txq,
596                                   struct i40e_tx_queue *txq)
597 {
598         txq_info->vsi_id = vsi_id;
599         txq_info->queue_id = queue_id;
600         if (queue_id < nb_txq) {
601                 txq_info->ring_len = txq->nb_tx_desc;
602                 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
603         }
604 }
605
606 static void
607 i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
608                                   uint16_t vsi_id,
609                                   uint16_t queue_id,
610                                   uint16_t nb_rxq,
611                                   uint32_t max_pkt_size,
612                                   struct i40e_rx_queue *rxq)
613 {
614         rxq_info->vsi_id = vsi_id;
615         rxq_info->queue_id = queue_id;
616         rxq_info->max_pkt_size = max_pkt_size;
617         if (queue_id < nb_rxq) {
618                 rxq_info->ring_len = rxq->nb_rx_desc;
619                 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
620                 rxq_info->databuffer_size =
621                         (rte_pktmbuf_data_room_size(rxq->mp) -
622                                 RTE_PKTMBUF_HEADROOM);
623         }
624 }
625
626 /* It configures VSI queues to co-work with Linux PF host */
627 static int
628 i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
629 {
630         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
631         struct i40e_rx_queue **rxq =
632                 (struct i40e_rx_queue **)dev->data->rx_queues;
633         struct i40e_tx_queue **txq =
634                 (struct i40e_tx_queue **)dev->data->tx_queues;
635         struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
636         struct i40e_virtchnl_queue_pair_info *vc_qpi;
637         struct vf_cmd_info args;
638         uint16_t i, nb_qp = vf->num_queue_pairs;
639         const uint32_t size =
640                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
641         uint8_t buff[size];
642         int ret;
643
644         memset(buff, 0, sizeof(buff));
645         vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
646         vc_vqci->vsi_id = vf->vsi_res->vsi_id;
647         vc_vqci->num_queue_pairs = nb_qp;
648
649         for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
650                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
651                         vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
652                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
653                         vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
654                                         vf->max_pkt_len, rxq[i]);
655         }
656         memset(&args, 0, sizeof(args));
657         args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
658         args.in_args = (uint8_t *)vc_vqci;
659         args.in_args_size = size;
660         args.out_buffer = cmd_result_buffer;
661         args.out_size = I40E_AQ_BUF_SZ;
662         ret = i40evf_execute_vf_cmd(dev, &args);
663         if (ret)
664                 PMD_DRV_LOG(ERR, "Failed to execute command of "
665                         "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
666
667         return ret;
668 }
669
670 /* It configures VSI queues to co-work with DPDK PF host */
671 static int
672 i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
673 {
674         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
675         struct i40e_rx_queue **rxq =
676                 (struct i40e_rx_queue **)dev->data->rx_queues;
677         struct i40e_tx_queue **txq =
678                 (struct i40e_tx_queue **)dev->data->tx_queues;
679         struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
680         struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
681         struct vf_cmd_info args;
682         uint16_t i, nb_qp = vf->num_queue_pairs;
683         const uint32_t size =
684                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
685         uint8_t buff[size];
686         int ret;
687
688         memset(buff, 0, sizeof(buff));
689         vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
690         vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
691         vc_vqcei->num_queue_pairs = nb_qp;
692         vc_qpei = vc_vqcei->qpair;
693         for (i = 0; i < nb_qp; i++, vc_qpei++) {
694                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
695                         vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
696                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
697                         vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
698                                         vf->max_pkt_len, rxq[i]);
699                 if (i < dev->data->nb_rx_queues)
700                         /*
701                          * It adds extra info for configuring VSI queues, which
702                          * is needed to enable the configurable crc stripping
703                          * in VF.
704                          */
705                         vc_qpei->rxq_ext.crcstrip =
706                                 dev->data->dev_conf.rxmode.hw_strip_crc;
707         }
708         memset(&args, 0, sizeof(args));
709         args.ops =
710                 (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
711         args.in_args = (uint8_t *)vc_vqcei;
712         args.in_args_size = size;
713         args.out_buffer = cmd_result_buffer;
714         args.out_size = I40E_AQ_BUF_SZ;
715         ret = i40evf_execute_vf_cmd(dev, &args);
716         if (ret)
717                 PMD_DRV_LOG(ERR, "Failed to execute command of "
718                         "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
719
720         return ret;
721 }
722
723 static int
724 i40evf_configure_queues(struct rte_eth_dev *dev)
725 {
726         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
727
728         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
729                 /* To support DPDK PF host */
730                 return i40evf_configure_vsi_queues_ext(dev);
731         else
732                 /* To support Linux PF host */
733                 return i40evf_configure_vsi_queues(dev);
734 }
735
736 static int
737 i40evf_config_irq_map(struct rte_eth_dev *dev)
738 {
739         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
740         struct vf_cmd_info args;
741         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
742                 sizeof(struct i40e_virtchnl_vector_map)];
743         struct i40e_virtchnl_irq_map_info *map_info;
744         int i, err;
745         map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
746         map_info->num_vectors = 1;
747         map_info->vecmap[0].rxitr_idx = I40E_QINT_RQCTL_MSIX_INDX_NOITR;
748         map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
749         /* Alway use default dynamic MSIX interrupt */
750         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
751                 map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
752         else
753                 map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX;
754
755         /* Don't map any tx queue */
756         map_info->vecmap[0].txq_map = 0;
757         map_info->vecmap[0].rxq_map = 0;
758         for (i = 0; i < dev->data->nb_rx_queues; i++)
759                 map_info->vecmap[0].rxq_map |= 1 << i;
760
761         args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
762         args.in_args = (u8 *)cmd_buffer;
763         args.in_args_size = sizeof(cmd_buffer);
764         args.out_buffer = cmd_result_buffer;
765         args.out_size = I40E_AQ_BUF_SZ;
766         err = i40evf_execute_vf_cmd(dev, &args);
767         if (err)
768                 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
769
770         return err;
771 }
772
773 static int
774 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
775                                 bool on)
776 {
777         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
778         struct i40e_virtchnl_queue_select queue_select;
779         int err;
780         struct vf_cmd_info args;
781         memset(&queue_select, 0, sizeof(queue_select));
782         queue_select.vsi_id = vf->vsi_res->vsi_id;
783
784         if (isrx)
785                 queue_select.rx_queues |= 1 << qid;
786         else
787                 queue_select.tx_queues |= 1 << qid;
788
789         if (on)
790                 args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
791         else
792                 args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
793         args.in_args = (u8 *)&queue_select;
794         args.in_args_size = sizeof(queue_select);
795         args.out_buffer = cmd_result_buffer;
796         args.out_size = I40E_AQ_BUF_SZ;
797         err = i40evf_execute_vf_cmd(dev, &args);
798         if (err)
799                 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
800                             isrx ? "RX" : "TX", qid, on ? "on" : "off");
801
802         return err;
803 }
804
805 static int
806 i40evf_start_queues(struct rte_eth_dev *dev)
807 {
808         struct rte_eth_dev_data *dev_data = dev->data;
809         int i;
810         struct i40e_rx_queue *rxq;
811         struct i40e_tx_queue *txq;
812
813         for (i = 0; i < dev->data->nb_rx_queues; i++) {
814                 rxq = dev_data->rx_queues[i];
815                 if (rxq->rx_deferred_start)
816                         continue;
817                 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
818                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
819                         return -1;
820                 }
821         }
822
823         for (i = 0; i < dev->data->nb_tx_queues; i++) {
824                 txq = dev_data->tx_queues[i];
825                 if (txq->tx_deferred_start)
826                         continue;
827                 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
828                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
829                         return -1;
830                 }
831         }
832
833         return 0;
834 }
835
836 static int
837 i40evf_stop_queues(struct rte_eth_dev *dev)
838 {
839         int i;
840
841         /* Stop TX queues first */
842         for (i = 0; i < dev->data->nb_tx_queues; i++) {
843                 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
844                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
845                         return -1;
846                 }
847         }
848
849         /* Then stop RX queues */
850         for (i = 0; i < dev->data->nb_rx_queues; i++) {
851                 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
852                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
853                         return -1;
854                 }
855         }
856
857         return 0;
858 }
859
860 static int
861 i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
862 {
863         struct i40e_virtchnl_ether_addr_list *list;
864         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
865         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
866                         sizeof(struct i40e_virtchnl_ether_addr)];
867         int err;
868         struct vf_cmd_info args;
869
870         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
871                 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
872                             addr->addr_bytes[0], addr->addr_bytes[1],
873                             addr->addr_bytes[2], addr->addr_bytes[3],
874                             addr->addr_bytes[4], addr->addr_bytes[5]);
875                 return -1;
876         }
877
878         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
879         list->vsi_id = vf->vsi_res->vsi_id;
880         list->num_elements = 1;
881         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
882                                         sizeof(addr->addr_bytes));
883
884         args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
885         args.in_args = cmd_buffer;
886         args.in_args_size = sizeof(cmd_buffer);
887         args.out_buffer = cmd_result_buffer;
888         args.out_size = I40E_AQ_BUF_SZ;
889         err = i40evf_execute_vf_cmd(dev, &args);
890         if (err)
891                 PMD_DRV_LOG(ERR, "fail to execute command "
892                             "OP_ADD_ETHER_ADDRESS");
893
894         return err;
895 }
896
897 static int
898 i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
899 {
900         struct i40e_virtchnl_ether_addr_list *list;
901         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
902         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
903                         sizeof(struct i40e_virtchnl_ether_addr)];
904         int err;
905         struct vf_cmd_info args;
906
907         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
908                 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
909                             addr->addr_bytes[0], addr->addr_bytes[1],
910                             addr->addr_bytes[2], addr->addr_bytes[3],
911                             addr->addr_bytes[4], addr->addr_bytes[5]);
912                 return -1;
913         }
914
915         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
916         list->vsi_id = vf->vsi_res->vsi_id;
917         list->num_elements = 1;
918         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
919                         sizeof(addr->addr_bytes));
920
921         args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
922         args.in_args = cmd_buffer;
923         args.in_args_size = sizeof(cmd_buffer);
924         args.out_buffer = cmd_result_buffer;
925         args.out_size = I40E_AQ_BUF_SZ;
926         err = i40evf_execute_vf_cmd(dev, &args);
927         if (err)
928                 PMD_DRV_LOG(ERR, "fail to execute command "
929                             "OP_DEL_ETHER_ADDRESS");
930
931         return err;
932 }
933
934 static int
935 i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
936 {
937         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
938         struct i40e_virtchnl_queue_select q_stats;
939         int err;
940         struct vf_cmd_info args;
941
942         memset(&q_stats, 0, sizeof(q_stats));
943         q_stats.vsi_id = vf->vsi_res->vsi_id;
944         args.ops = I40E_VIRTCHNL_OP_GET_STATS;
945         args.in_args = (u8 *)&q_stats;
946         args.in_args_size = sizeof(q_stats);
947         args.out_buffer = cmd_result_buffer;
948         args.out_size = I40E_AQ_BUF_SZ;
949
950         err = i40evf_execute_vf_cmd(dev, &args);
951         if (err) {
952                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
953                 *pstats = NULL;
954                 return err;
955         }
956         *pstats = (struct i40e_eth_stats *)args.out_buffer;
957         return 0;
958 }
959
960 static int
961 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
962 {
963         int ret;
964         struct i40e_eth_stats *pstats = NULL;
965
966         ret = i40evf_update_stats(dev, &pstats);
967         if (ret != 0)
968                 return 0;
969
970         stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
971                                                 pstats->rx_broadcast;
972         stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
973                                                 pstats->tx_unicast;
974         stats->ierrors = pstats->rx_discards;
975         stats->oerrors = pstats->tx_errors + pstats->tx_discards;
976         stats->ibytes = pstats->rx_bytes;
977         stats->obytes = pstats->tx_bytes;
978
979         return 0;
980 }
981
982 static void
983 i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
984 {
985         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
986         struct i40e_eth_stats *pstats = NULL;
987
988         /* read stat values to clear hardware registers */
989         i40evf_update_stats(dev, &pstats);
990
991         /* set stats offset base on current values */
992         vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
993 }
994
995 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
996                                  struct rte_eth_xstats *xstats, unsigned n)
997 {
998         int ret;
999         unsigned i;
1000         struct i40e_eth_stats *pstats = NULL;
1001
1002         if (n < I40EVF_NB_XSTATS)
1003                 return I40EVF_NB_XSTATS;
1004
1005         ret = i40evf_update_stats(dev, &pstats);
1006         if (ret != 0)
1007                 return 0;
1008
1009         if (!xstats)
1010                 return 0;
1011
1012         /* loop over xstats array and values from pstats */
1013         for (i = 0; i < I40EVF_NB_XSTATS; i++) {
1014                 snprintf(xstats[i].name, sizeof(xstats[i].name),
1015                          "%s", rte_i40evf_stats_strings[i].name);
1016                 xstats[i].value = *(uint64_t *)(((char *)pstats) +
1017                         rte_i40evf_stats_strings[i].offset);
1018         }
1019
1020         return I40EVF_NB_XSTATS;
1021 }
1022
1023 static int
1024 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1025 {
1026         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1027         struct i40e_virtchnl_vlan_filter_list *vlan_list;
1028         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
1029                                                         sizeof(uint16_t)];
1030         int err;
1031         struct vf_cmd_info args;
1032
1033         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
1034         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1035         vlan_list->num_elements = 1;
1036         vlan_list->vlan_id[0] = vlanid;
1037
1038         args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
1039         args.in_args = (u8 *)&cmd_buffer;
1040         args.in_args_size = sizeof(cmd_buffer);
1041         args.out_buffer = cmd_result_buffer;
1042         args.out_size = I40E_AQ_BUF_SZ;
1043         err = i40evf_execute_vf_cmd(dev, &args);
1044         if (err)
1045                 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
1046
1047         return err;
1048 }
1049
1050 static int
1051 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1052 {
1053         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1054         struct i40e_virtchnl_vlan_filter_list *vlan_list;
1055         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
1056                                                         sizeof(uint16_t)];
1057         int err;
1058         struct vf_cmd_info args;
1059
1060         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
1061         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1062         vlan_list->num_elements = 1;
1063         vlan_list->vlan_id[0] = vlanid;
1064
1065         args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
1066         args.in_args = (u8 *)&cmd_buffer;
1067         args.in_args_size = sizeof(cmd_buffer);
1068         args.out_buffer = cmd_result_buffer;
1069         args.out_size = I40E_AQ_BUF_SZ;
1070         err = i40evf_execute_vf_cmd(dev, &args);
1071         if (err)
1072                 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
1073
1074         return err;
1075 }
1076
1077 static int
1078 i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
1079 {
1080         int err;
1081         struct vf_cmd_info args;
1082         struct rte_eth_link *new_link;
1083
1084         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
1085         args.in_args = NULL;
1086         args.in_args_size = 0;
1087         args.out_buffer = cmd_result_buffer;
1088         args.out_size = I40E_AQ_BUF_SZ;
1089         err = i40evf_execute_vf_cmd(dev, &args);
1090         if (err) {
1091                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
1092                 return err;
1093         }
1094
1095         new_link = (struct rte_eth_link *)args.out_buffer;
1096         (void)rte_memcpy(link, new_link, sizeof(*link));
1097
1098         return 0;
1099 }
1100
1101 static const struct rte_pci_id pci_id_i40evf_map[] = {
1102 #define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
1103 #include "rte_pci_dev_ids.h"
1104 { .vendor_id = 0, /* sentinel */ },
1105 };
1106
1107 static inline int
1108 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
1109                                     struct rte_eth_link *link)
1110 {
1111         struct rte_eth_link *dst = &(dev->data->dev_link);
1112         struct rte_eth_link *src = link;
1113
1114         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1115                                         *(uint64_t *)src) == 0)
1116                 return -1;
1117
1118         return 0;
1119 }
1120
1121 static int
1122 i40evf_reset_vf(struct i40e_hw *hw)
1123 {
1124         int i, reset;
1125
1126         if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1127                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1128                 return -1;
1129         }
1130         /**
1131           * After issuing vf reset command to pf, pf won't necessarily
1132           * reset vf, it depends on what state it exactly is. If it's not
1133           * initialized yet, it won't have vf reset since it's in a certain
1134           * state. If not, it will try to reset. Even vf is reset, pf will
1135           * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1136           * it to ACTIVE. In this duration, vf may not catch the moment that
1137           * COMPLETE is set. So, for vf, we'll try to wait a long time.
1138           */
1139         rte_delay_ms(200);
1140
1141         for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1142                 reset = rd32(hw, I40E_VFGEN_RSTAT) &
1143                         I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1144                 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1145                 if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
1146                         break;
1147                 else
1148                         rte_delay_ms(50);
1149         }
1150
1151         if (i >= MAX_RESET_WAIT_CNT) {
1152                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1153                 return -1;
1154         }
1155
1156         return 0;
1157 }
1158
1159 static int
1160 i40evf_init_vf(struct rte_eth_dev *dev)
1161 {
1162         int i, err, bufsz;
1163         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1164         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1165
1166         vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1167         vf->dev_data = dev->data;
1168         err = i40evf_set_mac_type(hw);
1169         if (err) {
1170                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1171                 goto err;
1172         }
1173
1174         i40e_init_adminq_parameter(hw);
1175         err = i40e_init_adminq(hw);
1176         if (err) {
1177                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1178                 goto err;
1179         }
1180
1181
1182         /* Reset VF and wait until it's complete */
1183         if (i40evf_reset_vf(hw)) {
1184                 PMD_INIT_LOG(ERR, "reset NIC failed");
1185                 goto err_aq;
1186         }
1187
1188         /* VF reset, shutdown admin queue and initialize again */
1189         if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1190                 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1191                 return -1;
1192         }
1193
1194         i40e_init_adminq_parameter(hw);
1195         if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1196                 PMD_INIT_LOG(ERR, "init_adminq failed");
1197                 return -1;
1198         }
1199         if (i40evf_check_api_version(dev) != 0) {
1200                 PMD_INIT_LOG(ERR, "check_api version failed");
1201                 goto err_aq;
1202         }
1203         bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1204                 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1205         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1206         if (!vf->vf_res) {
1207                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1208                         goto err_aq;
1209         }
1210
1211         if (i40evf_get_vf_resource(dev) != 0) {
1212                 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1213                 goto err_alloc;
1214         }
1215
1216         /* got VF config message back from PF, now we can parse it */
1217         for (i = 0; i < vf->vf_res->num_vsis; i++) {
1218                 if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
1219                         vf->vsi_res = &vf->vf_res->vsi_res[i];
1220         }
1221
1222         if (!vf->vsi_res) {
1223                 PMD_INIT_LOG(ERR, "no LAN VSI found");
1224                 goto err_alloc;
1225         }
1226
1227         if (hw->mac.type == I40E_MAC_X722_VF)
1228                 vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
1229         vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1230         vf->vsi.type = vf->vsi_res->vsi_type;
1231         vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1232         vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1233
1234         /* check mac addr, if it's not valid, genrate one */
1235         if (I40E_SUCCESS != i40e_validate_mac_addr(\
1236                         vf->vsi_res->default_mac_addr))
1237                 eth_random_addr(vf->vsi_res->default_mac_addr);
1238
1239         ether_addr_copy((struct ether_addr *)vf->vsi_res->default_mac_addr,
1240                                         (struct ether_addr *)hw->mac.addr);
1241
1242         return 0;
1243
1244 err_alloc:
1245         rte_free(vf->vf_res);
1246 err_aq:
1247         i40e_shutdown_adminq(hw); /* ignore error */
1248 err:
1249         return -1;
1250 }
1251
1252 static int
1253 i40evf_uninit_vf(struct rte_eth_dev *dev)
1254 {
1255         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1256         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1257
1258         PMD_INIT_FUNC_TRACE();
1259
1260         if (hw->adapter_stopped == 0)
1261                 i40evf_dev_close(dev);
1262         rte_free(vf->vf_res);
1263         vf->vf_res = NULL;
1264
1265         return 0;
1266 }
1267
1268 static int
1269 i40evf_dev_init(struct rte_eth_dev *eth_dev)
1270 {
1271         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
1272                         eth_dev->data->dev_private);
1273
1274         PMD_INIT_FUNC_TRACE();
1275
1276         /* assign ops func pointer */
1277         eth_dev->dev_ops = &i40evf_eth_dev_ops;
1278         eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1279         eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1280
1281         /*
1282          * For secondary processes, we don't initialise any further as primary
1283          * has already done this work.
1284          */
1285         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1286                 i40e_set_rx_function(eth_dev);
1287                 i40e_set_tx_function(eth_dev);
1288                 return 0;
1289         }
1290
1291         rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
1292
1293         hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
1294         hw->device_id = eth_dev->pci_dev->id.device_id;
1295         hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
1296         hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
1297         hw->bus.device = eth_dev->pci_dev->addr.devid;
1298         hw->bus.func = eth_dev->pci_dev->addr.function;
1299         hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1300         hw->adapter_stopped = 0;
1301
1302         if(i40evf_init_vf(eth_dev) != 0) {
1303                 PMD_INIT_LOG(ERR, "Init vf failed");
1304                 return -1;
1305         }
1306
1307         /* copy mac addr */
1308         eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1309                                         ETHER_ADDR_LEN, 0);
1310         if (eth_dev->data->mac_addrs == NULL) {
1311                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
1312                                 "store MAC addresses", ETHER_ADDR_LEN);
1313                 return -ENOMEM;
1314         }
1315         ether_addr_copy((struct ether_addr *)hw->mac.addr,
1316                 (struct ether_addr *)eth_dev->data->mac_addrs);
1317
1318         return 0;
1319 }
1320
1321 static int
1322 i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
1323 {
1324         PMD_INIT_FUNC_TRACE();
1325
1326         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1327                 return -EPERM;
1328
1329         eth_dev->dev_ops = NULL;
1330         eth_dev->rx_pkt_burst = NULL;
1331         eth_dev->tx_pkt_burst = NULL;
1332
1333         if (i40evf_uninit_vf(eth_dev) != 0) {
1334                 PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
1335                 return -1;
1336         }
1337
1338         rte_free(eth_dev->data->mac_addrs);
1339         eth_dev->data->mac_addrs = NULL;
1340
1341         return 0;
1342 }
1343 /*
1344  * virtual function driver struct
1345  */
1346 static struct eth_driver rte_i40evf_pmd = {
1347         .pci_drv = {
1348                 .name = "rte_i40evf_pmd",
1349                 .id_table = pci_id_i40evf_map,
1350                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
1351         },
1352         .eth_dev_init = i40evf_dev_init,
1353         .eth_dev_uninit = i40evf_dev_uninit,
1354         .dev_private_size = sizeof(struct i40e_adapter),
1355 };
1356
1357 /*
1358  * VF Driver initialization routine.
1359  * Invoked one at EAL init time.
1360  * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
1361  */
1362 static int
1363 rte_i40evf_pmd_init(const char *name __rte_unused,
1364                     const char *params __rte_unused)
1365 {
1366         PMD_INIT_FUNC_TRACE();
1367
1368         rte_eth_driver_register(&rte_i40evf_pmd);
1369
1370         return 0;
1371 }
1372
1373 static struct rte_driver rte_i40evf_driver = {
1374         .type = PMD_PDEV,
1375         .init = rte_i40evf_pmd_init,
1376 };
1377
1378 PMD_REGISTER_DRIVER(rte_i40evf_driver);
1379
1380 static int
1381 i40evf_dev_configure(struct rte_eth_dev *dev)
1382 {
1383         struct i40e_adapter *ad =
1384                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1385
1386         /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1387          * allocation or vector Rx preconditions we will reset it.
1388          */
1389         ad->rx_bulk_alloc_allowed = true;
1390         ad->rx_vec_allowed = true;
1391         ad->tx_simple_allowed = true;
1392         ad->tx_vec_allowed = true;
1393
1394         return i40evf_init_vlan(dev);
1395 }
1396
1397 static int
1398 i40evf_init_vlan(struct rte_eth_dev *dev)
1399 {
1400         struct rte_eth_dev_data *data = dev->data;
1401         int ret;
1402
1403         /* Apply vlan offload setting */
1404         i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1405
1406         /* Apply pvid setting */
1407         ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
1408                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
1409         return ret;
1410 }
1411
1412 static void
1413 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1414 {
1415         bool enable_vlan_strip = 0;
1416         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1417         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1418
1419         /* Linux pf host doesn't support vlan offload yet */
1420         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1421                 /* Vlan stripping setting */
1422                 if (mask & ETH_VLAN_STRIP_MASK) {
1423                         /* Enable or disable VLAN stripping */
1424                         if (dev_conf->rxmode.hw_vlan_strip)
1425                                 enable_vlan_strip = 1;
1426                         else
1427                                 enable_vlan_strip = 0;
1428
1429                         i40evf_config_vlan_offload(dev, enable_vlan_strip);
1430                 }
1431         }
1432 }
1433
1434 static int
1435 i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1436 {
1437         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1438         struct i40e_vsi_vlan_pvid_info info;
1439         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1440
1441         memset(&info, 0, sizeof(info));
1442         info.on = on;
1443
1444         /* Linux pf host don't support vlan offload yet */
1445         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1446                 if (info.on)
1447                         info.config.pvid = pvid;
1448                 else {
1449                         info.config.reject.tagged =
1450                                 dev_conf->txmode.hw_vlan_reject_tagged;
1451                         info.config.reject.untagged =
1452                                 dev_conf->txmode.hw_vlan_reject_untagged;
1453                 }
1454                 return i40evf_config_vlan_pvid(dev, &info);
1455         }
1456
1457         return 0;
1458 }
1459
1460 static int
1461 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1462 {
1463         struct i40e_rx_queue *rxq;
1464         int err = 0;
1465         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1466
1467         PMD_INIT_FUNC_TRACE();
1468
1469         if (rx_queue_id < dev->data->nb_rx_queues) {
1470                 rxq = dev->data->rx_queues[rx_queue_id];
1471
1472                 err = i40e_alloc_rx_queue_mbufs(rxq);
1473                 if (err) {
1474                         PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1475                         return err;
1476                 }
1477
1478                 rte_wmb();
1479
1480                 /* Init the RX tail register. */
1481                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1482                 I40EVF_WRITE_FLUSH(hw);
1483
1484                 /* Ready to switch the queue on */
1485                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1486
1487                 if (err)
1488                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1489                                     rx_queue_id);
1490         }
1491
1492         return err;
1493 }
1494
1495 static int
1496 i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1497 {
1498         struct i40e_rx_queue *rxq;
1499         int err;
1500
1501         if (rx_queue_id < dev->data->nb_rx_queues) {
1502                 rxq = dev->data->rx_queues[rx_queue_id];
1503
1504                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1505
1506                 if (err) {
1507                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1508                                     rx_queue_id);
1509                         return err;
1510                 }
1511
1512                 i40e_rx_queue_release_mbufs(rxq);
1513                 i40e_reset_rx_queue(rxq);
1514         }
1515
1516         return 0;
1517 }
1518
1519 static int
1520 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1521 {
1522         int err = 0;
1523
1524         PMD_INIT_FUNC_TRACE();
1525
1526         if (tx_queue_id < dev->data->nb_tx_queues) {
1527
1528                 /* Ready to switch the queue on */
1529                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1530
1531                 if (err)
1532                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1533                                     tx_queue_id);
1534         }
1535
1536         return err;
1537 }
1538
1539 static int
1540 i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1541 {
1542         struct i40e_tx_queue *txq;
1543         int err;
1544
1545         if (tx_queue_id < dev->data->nb_tx_queues) {
1546                 txq = dev->data->tx_queues[tx_queue_id];
1547
1548                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1549
1550                 if (err) {
1551                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1552                                     tx_queue_id);
1553                         return err;
1554                 }
1555
1556                 i40e_tx_queue_release_mbufs(txq);
1557                 i40e_reset_tx_queue(txq);
1558         }
1559
1560         return 0;
1561 }
1562
1563 static int
1564 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1565 {
1566         int ret;
1567
1568         if (on)
1569                 ret = i40evf_add_vlan(dev, vlan_id);
1570         else
1571                 ret = i40evf_del_vlan(dev,vlan_id);
1572
1573         return ret;
1574 }
1575
1576 static int
1577 i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
1578 {
1579         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1580         struct rte_eth_dev_data *dev_data = dev->data;
1581         struct rte_pktmbuf_pool_private *mbp_priv;
1582         uint16_t buf_size, len;
1583
1584         rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
1585         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1586         I40EVF_WRITE_FLUSH(hw);
1587
1588         /* Calculate the maximum packet length allowed */
1589         mbp_priv = rte_mempool_get_priv(rxq->mp);
1590         buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
1591                                         RTE_PKTMBUF_HEADROOM);
1592         rxq->hs_mode = i40e_header_split_none;
1593         rxq->rx_hdr_len = 0;
1594         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
1595         len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
1596         rxq->max_pkt_len = RTE_MIN(len,
1597                 dev_data->dev_conf.rxmode.max_rx_pkt_len);
1598
1599         /**
1600          * Check if the jumbo frame and maximum packet length are set correctly
1601          */
1602         if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
1603                 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
1604                     rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1605                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1606                                 "larger than %u and smaller than %u, as jumbo "
1607                                 "frame is enabled", (uint32_t)ETHER_MAX_LEN,
1608                                         (uint32_t)I40E_FRAME_SIZE_MAX);
1609                         return I40E_ERR_CONFIG;
1610                 }
1611         } else {
1612                 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
1613                     rxq->max_pkt_len > ETHER_MAX_LEN) {
1614                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1615                                 "larger than %u and smaller than %u, as jumbo "
1616                                 "frame is disabled", (uint32_t)ETHER_MIN_LEN,
1617                                                 (uint32_t)ETHER_MAX_LEN);
1618                         return I40E_ERR_CONFIG;
1619                 }
1620         }
1621
1622         if (dev_data->dev_conf.rxmode.enable_scatter ||
1623             (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
1624                 dev_data->scattered_rx = 1;
1625         }
1626
1627         return 0;
1628 }
1629
1630 static int
1631 i40evf_rx_init(struct rte_eth_dev *dev)
1632 {
1633         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1634         uint16_t i;
1635         int ret = I40E_SUCCESS;
1636         struct i40e_rx_queue **rxq =
1637                 (struct i40e_rx_queue **)dev->data->rx_queues;
1638
1639         i40evf_config_rss(vf);
1640         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1641                 if (!rxq[i] || !rxq[i]->q_set)
1642                         continue;
1643                 ret = i40evf_rxq_init(dev, rxq[i]);
1644                 if (ret != I40E_SUCCESS)
1645                         break;
1646         }
1647         if (ret == I40E_SUCCESS)
1648                 i40e_set_rx_function(dev);
1649
1650         return ret;
1651 }
1652
1653 static void
1654 i40evf_tx_init(struct rte_eth_dev *dev)
1655 {
1656         uint16_t i;
1657         struct i40e_tx_queue **txq =
1658                 (struct i40e_tx_queue **)dev->data->tx_queues;
1659         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1660
1661         for (i = 0; i < dev->data->nb_tx_queues; i++)
1662                 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1663
1664         i40e_set_tx_function(dev);
1665 }
1666
1667 static inline void
1668 i40evf_enable_queues_intr(struct rte_eth_dev *dev)
1669 {
1670         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1671         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1672
1673         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1674                 /* To support DPDK PF host */
1675                 I40E_WRITE_REG(hw,
1676                         I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1677                         I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1678                         I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1679         else
1680                 /* To support Linux PF host */
1681                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1682                                 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1683                                 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK);
1684 }
1685
1686 static inline void
1687 i40evf_disable_queues_intr(struct rte_eth_dev *dev)
1688 {
1689         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1690         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1691
1692         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1693                 I40E_WRITE_REG(hw,
1694                         I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1695                         0);
1696         else
1697                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1698 }
1699
1700 static int
1701 i40evf_dev_start(struct rte_eth_dev *dev)
1702 {
1703         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1704         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1705         struct ether_addr mac_addr;
1706
1707         PMD_INIT_FUNC_TRACE();
1708
1709         hw->adapter_stopped = 0;
1710
1711         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1712         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1713                                         dev->data->nb_tx_queues);
1714
1715         if (i40evf_rx_init(dev) != 0){
1716                 PMD_DRV_LOG(ERR, "failed to do RX init");
1717                 return -1;
1718         }
1719
1720         i40evf_tx_init(dev);
1721
1722         if (i40evf_configure_queues(dev) != 0) {
1723                 PMD_DRV_LOG(ERR, "configure queues failed");
1724                 goto err_queue;
1725         }
1726         if (i40evf_config_irq_map(dev)) {
1727                 PMD_DRV_LOG(ERR, "config_irq_map failed");
1728                 goto err_queue;
1729         }
1730
1731         /* Set mac addr */
1732         (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
1733                                 sizeof(mac_addr.addr_bytes));
1734         if (i40evf_add_mac_addr(dev, &mac_addr)) {
1735                 PMD_DRV_LOG(ERR, "Failed to add mac addr");
1736                 goto err_queue;
1737         }
1738
1739         if (i40evf_start_queues(dev) != 0) {
1740                 PMD_DRV_LOG(ERR, "enable queues failed");
1741                 goto err_mac;
1742         }
1743
1744         i40evf_enable_queues_intr(dev);
1745         return 0;
1746
1747 err_mac:
1748         i40evf_del_mac_addr(dev, &mac_addr);
1749 err_queue:
1750         return -1;
1751 }
1752
1753 static void
1754 i40evf_dev_stop(struct rte_eth_dev *dev)
1755 {
1756         PMD_INIT_FUNC_TRACE();
1757
1758         i40evf_disable_queues_intr(dev);
1759         i40evf_stop_queues(dev);
1760         i40e_dev_clear_queues(dev);
1761 }
1762
1763 static int
1764 i40evf_dev_link_update(struct rte_eth_dev *dev,
1765                        __rte_unused int wait_to_complete)
1766 {
1767         struct rte_eth_link new_link;
1768         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1769         /*
1770          * DPDK pf host provide interfacet to acquire link status
1771          * while Linux driver does not
1772          */
1773         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1774                 i40evf_get_link_status(dev, &new_link);
1775         else {
1776                 /* Always assume it's up, for Linux driver PF host */
1777                 new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
1778                 new_link.link_speed  = ETH_LINK_SPEED_10000;
1779                 new_link.link_status = 1;
1780         }
1781         i40evf_dev_atomic_write_link_status(dev, &new_link);
1782
1783         return 0;
1784 }
1785
1786 static void
1787 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1788 {
1789         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1790         int ret;
1791
1792         /* If enabled, just return */
1793         if (vf->promisc_unicast_enabled)
1794                 return;
1795
1796         ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
1797         if (ret == 0)
1798                 vf->promisc_unicast_enabled = TRUE;
1799 }
1800
1801 static void
1802 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1803 {
1804         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1805         int ret;
1806
1807         /* If disabled, just return */
1808         if (!vf->promisc_unicast_enabled)
1809                 return;
1810
1811         ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
1812         if (ret == 0)
1813                 vf->promisc_unicast_enabled = FALSE;
1814 }
1815
1816 static void
1817 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1818 {
1819         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1820         int ret;
1821
1822         /* If enabled, just return */
1823         if (vf->promisc_multicast_enabled)
1824                 return;
1825
1826         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
1827         if (ret == 0)
1828                 vf->promisc_multicast_enabled = TRUE;
1829 }
1830
1831 static void
1832 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1833 {
1834         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1835         int ret;
1836
1837         /* If enabled, just return */
1838         if (!vf->promisc_multicast_enabled)
1839                 return;
1840
1841         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
1842         if (ret == 0)
1843                 vf->promisc_multicast_enabled = FALSE;
1844 }
1845
1846 static void
1847 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1848 {
1849         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1850
1851         memset(dev_info, 0, sizeof(*dev_info));
1852         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
1853         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
1854         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1855         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1856         dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1857         dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
1858         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
1859         dev_info->rx_offload_capa =
1860                 DEV_RX_OFFLOAD_VLAN_STRIP |
1861                 DEV_RX_OFFLOAD_QINQ_STRIP |
1862                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1863                 DEV_RX_OFFLOAD_UDP_CKSUM |
1864                 DEV_RX_OFFLOAD_TCP_CKSUM;
1865         dev_info->tx_offload_capa =
1866                 DEV_TX_OFFLOAD_VLAN_INSERT |
1867                 DEV_TX_OFFLOAD_QINQ_INSERT |
1868                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1869                 DEV_TX_OFFLOAD_UDP_CKSUM |
1870                 DEV_TX_OFFLOAD_TCP_CKSUM |
1871                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1872
1873         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1874                 .rx_thresh = {
1875                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
1876                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
1877                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
1878                 },
1879                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1880                 .rx_drop_en = 0,
1881         };
1882
1883         dev_info->default_txconf = (struct rte_eth_txconf) {
1884                 .tx_thresh = {
1885                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
1886                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
1887                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
1888                 },
1889                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1890                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1891                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1892                                 ETH_TXQ_FLAGS_NOOFFLOADS,
1893         };
1894
1895         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1896                 .nb_max = I40E_MAX_RING_DESC,
1897                 .nb_min = I40E_MIN_RING_DESC,
1898                 .nb_align = I40E_ALIGN_RING_DESC,
1899         };
1900
1901         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1902                 .nb_max = I40E_MAX_RING_DESC,
1903                 .nb_min = I40E_MIN_RING_DESC,
1904                 .nb_align = I40E_ALIGN_RING_DESC,
1905         };
1906 }
1907
1908 static void
1909 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1910 {
1911         if (i40evf_get_statics(dev, stats))
1912                 PMD_DRV_LOG(ERR, "Get statics failed");
1913 }
1914
1915 static void
1916 i40evf_dev_close(struct rte_eth_dev *dev)
1917 {
1918         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1919
1920         i40evf_dev_stop(dev);
1921         hw->adapter_stopped = 1;
1922         i40e_dev_free_queues(dev);
1923         i40evf_reset_vf(hw);
1924         i40e_shutdown_adminq(hw);
1925 }
1926
1927 static int
1928 i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
1929 {
1930         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
1931         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1932         int ret;
1933
1934         if (!lut)
1935                 return -EINVAL;
1936
1937         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
1938                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
1939                                           lut, lut_size);
1940                 if (ret) {
1941                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
1942                         return ret;
1943                 }
1944         } else {
1945                 uint32_t *lut_dw = (uint32_t *)lut;
1946                 uint16_t i, lut_size_dw = lut_size / 4;
1947
1948                 for (i = 0; i < lut_size_dw; i++)
1949                         lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
1950         }
1951
1952         return 0;
1953 }
1954
1955 static int
1956 i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
1957 {
1958         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
1959         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1960         int ret;
1961
1962         if (!vsi || !lut)
1963                 return -EINVAL;
1964
1965         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
1966                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
1967                                           lut, lut_size);
1968                 if (ret) {
1969                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
1970                         return ret;
1971                 }
1972         } else {
1973                 uint32_t *lut_dw = (uint32_t *)lut;
1974                 uint16_t i, lut_size_dw = lut_size / 4;
1975
1976                 for (i = 0; i < lut_size_dw; i++)
1977                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
1978                 I40EVF_WRITE_FLUSH(hw);
1979         }
1980
1981         return 0;
1982 }
1983
1984 static int
1985 i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
1986                            struct rte_eth_rss_reta_entry64 *reta_conf,
1987                            uint16_t reta_size)
1988 {
1989         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1990         uint8_t *lut;
1991         uint16_t i, idx, shift;
1992         int ret;
1993
1994         if (reta_size != ETH_RSS_RETA_SIZE_64) {
1995                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1996                         "(%d) doesn't match the number of hardware can "
1997                         "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
1998                 return -EINVAL;
1999         }
2000
2001         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2002         if (!lut) {
2003                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2004                 return -ENOMEM;
2005         }
2006         ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2007         if (ret)
2008                 goto out;
2009         for (i = 0; i < reta_size; i++) {
2010                 idx = i / RTE_RETA_GROUP_SIZE;
2011                 shift = i % RTE_RETA_GROUP_SIZE;
2012                 if (reta_conf[idx].mask & (1ULL << shift))
2013                         lut[i] = reta_conf[idx].reta[shift];
2014         }
2015         ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
2016
2017 out:
2018         rte_free(lut);
2019
2020         return ret;
2021 }
2022
2023 static int
2024 i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
2025                           struct rte_eth_rss_reta_entry64 *reta_conf,
2026                           uint16_t reta_size)
2027 {
2028         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2029         uint16_t i, idx, shift;
2030         uint8_t *lut;
2031         int ret;
2032
2033         if (reta_size != ETH_RSS_RETA_SIZE_64) {
2034                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2035                         "(%d) doesn't match the number of hardware can "
2036                         "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
2037                 return -EINVAL;
2038         }
2039
2040         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2041         if (!lut) {
2042                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2043                 return -ENOMEM;
2044         }
2045
2046         ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2047         if (ret)
2048                 goto out;
2049         for (i = 0; i < reta_size; i++) {
2050                 idx = i / RTE_RETA_GROUP_SIZE;
2051                 shift = i % RTE_RETA_GROUP_SIZE;
2052                 if (reta_conf[idx].mask & (1ULL << shift))
2053                         reta_conf[idx].reta[shift] = lut[i];
2054         }
2055
2056 out:
2057         rte_free(lut);
2058
2059         return ret;
2060 }
2061
2062 static int
2063 i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
2064 {
2065         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2066         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2067         int ret = 0;
2068
2069         if (!key || key_len != ((I40E_VFQF_HKEY_MAX_INDEX + 1) *
2070                 sizeof(uint32_t)))
2071                 return -EINVAL;
2072
2073         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2074                 struct i40e_aqc_get_set_rss_key_data *key_dw =
2075                         (struct i40e_aqc_get_set_rss_key_data *)key;
2076
2077                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
2078                 if (ret)
2079                         PMD_INIT_LOG(ERR, "Failed to configure RSS key "
2080                                      "via AQ");
2081         } else {
2082                 uint32_t *hash_key = (uint32_t *)key;
2083                 uint16_t i;
2084
2085                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2086                         I40E_WRITE_REG(hw, I40E_VFQF_HKEY(i), hash_key[i]);
2087                 I40EVF_WRITE_FLUSH(hw);
2088         }
2089
2090         return ret;
2091 }
2092
2093 static int
2094 i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
2095 {
2096         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2097         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2098         int ret;
2099
2100         if (!key || !key_len)
2101                 return -EINVAL;
2102
2103         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2104                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
2105                         (struct i40e_aqc_get_set_rss_key_data *)key);
2106                 if (ret) {
2107                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
2108                         return ret;
2109                 }
2110         } else {
2111                 uint32_t *key_dw = (uint32_t *)key;
2112                 uint16_t i;
2113
2114                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2115                         key_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HKEY(i));
2116         }
2117         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2118
2119         return 0;
2120 }
2121
2122 static int
2123 i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
2124 {
2125         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2126         uint64_t rss_hf, hena;
2127         int ret;
2128
2129         ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
2130                                  rss_conf->rss_key_len);
2131         if (ret)
2132                 return ret;
2133
2134         rss_hf = rss_conf->rss_hf;
2135         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2136         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2137         hena &= ~I40E_RSS_HENA_ALL;
2138         hena |= i40e_config_hena(rss_hf);
2139         I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2140         I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2141         I40EVF_WRITE_FLUSH(hw);
2142
2143         return 0;
2144 }
2145
2146 static void
2147 i40evf_disable_rss(struct i40e_vf *vf)
2148 {
2149         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2150         uint64_t hena;
2151
2152         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2153         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2154         hena &= ~I40E_RSS_HENA_ALL;
2155         I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2156         I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2157         I40EVF_WRITE_FLUSH(hw);
2158 }
2159
2160 static int
2161 i40evf_config_rss(struct i40e_vf *vf)
2162 {
2163         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2164         struct rte_eth_rss_conf rss_conf;
2165         uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
2166         uint16_t num;
2167
2168         if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
2169                 i40evf_disable_rss(vf);
2170                 PMD_DRV_LOG(DEBUG, "RSS not configured\n");
2171                 return 0;
2172         }
2173
2174         num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
2175         /* Fill out the look up table */
2176         for (i = 0, j = 0; i < nb_q; i++, j++) {
2177                 if (j >= num)
2178                         j = 0;
2179                 lut = (lut << 8) | j;
2180                 if ((i & 3) == 3)
2181                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
2182         }
2183
2184         rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
2185         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
2186                 i40evf_disable_rss(vf);
2187                 PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
2188                 return 0;
2189         }
2190
2191         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < nb_q) {
2192                 /* Calculate the default hash key */
2193                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2194                         rss_key_default[i] = (uint32_t)rte_rand();
2195                 rss_conf.rss_key = (uint8_t *)rss_key_default;
2196                 rss_conf.rss_key_len = nb_q;
2197         }
2198
2199         return i40evf_hw_rss_hash_set(vf, &rss_conf);
2200 }
2201
2202 static int
2203 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
2204                            struct rte_eth_rss_conf *rss_conf)
2205 {
2206         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2207         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2208         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
2209         uint64_t hena;
2210
2211         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2212         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2213         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
2214                 if (rss_hf != 0) /* Enable RSS */
2215                         return -EINVAL;
2216                 return 0;
2217         }
2218
2219         /* RSS enabled */
2220         if (rss_hf == 0) /* Disable RSS */
2221                 return -EINVAL;
2222
2223         return i40evf_hw_rss_hash_set(vf, rss_conf);
2224 }
2225
2226 static int
2227 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2228                              struct rte_eth_rss_conf *rss_conf)
2229 {
2230         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2231         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2232         uint64_t hena;
2233
2234         i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
2235                            &rss_conf->rss_key_len);
2236
2237         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2238         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2239         rss_conf->rss_hf = i40e_parse_hena(hena);
2240
2241         return 0;
2242 }