e8a98c7ae7b403b0885f8959d6af549c2b00a8f0
[dpdk.git] / drivers / net / i40e / i40e_ethdev_vf.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
45
46 #include <rte_interrupts.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_eal.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_atomic.h>
59 #include <rte_malloc.h>
60 #include <rte_dev.h>
61
62 #include "i40e_logs.h"
63 #include "base/i40e_prototype.h"
64 #include "base/i40e_adminq_cmd.h"
65 #include "base/i40e_type.h"
66
67 #include "i40e_rxtx.h"
68 #include "i40e_ethdev.h"
69 #include "i40e_pf.h"
70 #define I40EVF_VSI_DEFAULT_MSIX_INTR     1
71 #define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
72
73 /* busy wait delay in msec */
74 #define I40EVF_BUSY_WAIT_DELAY 10
75 #define I40EVF_BUSY_WAIT_COUNT 50
76 #define MAX_RESET_WAIT_CNT     20
77 /*ITR index for NOITR*/
78 #define I40E_QINT_RQCTL_MSIX_INDX_NOITR     3
79
80 struct i40evf_arq_msg_info {
81         enum i40e_virtchnl_ops ops;
82         enum i40e_status_code result;
83         uint16_t buf_len;
84         uint16_t msg_len;
85         uint8_t *msg;
86 };
87
88 struct vf_cmd_info {
89         enum i40e_virtchnl_ops ops;
90         uint8_t *in_args;
91         uint32_t in_args_size;
92         uint8_t *out_buffer;
93         /* Input & output type. pass in buffer size and pass out
94          * actual return result
95          */
96         uint32_t out_size;
97 };
98
99 enum i40evf_aq_result {
100         I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
101         I40EVF_MSG_NON,      /* Read nothing from admin queue */
102         I40EVF_MSG_SYS,      /* Read system msg from admin queue */
103         I40EVF_MSG_CMD,      /* Read async command result */
104 };
105
106 /* A share buffer to store the command result from PF driver */
107 static uint8_t cmd_result_buffer[I40E_AQ_BUF_SZ];
108
109 static int i40evf_dev_configure(struct rte_eth_dev *dev);
110 static int i40evf_dev_start(struct rte_eth_dev *dev);
111 static void i40evf_dev_stop(struct rte_eth_dev *dev);
112 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
113                                 struct rte_eth_dev_info *dev_info);
114 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
115                                   __rte_unused int wait_to_complete);
116 static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
117                                 struct rte_eth_stats *stats);
118 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
119                                  struct rte_eth_xstats *xstats, unsigned n);
120 static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
121 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
122                                   uint16_t vlan_id, int on);
123 static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
124 static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
125                                 int on);
126 static void i40evf_dev_close(struct rte_eth_dev *dev);
127 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
128 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
129 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
130 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
131 static int i40evf_get_link_status(struct rte_eth_dev *dev,
132                                   struct rte_eth_link *link);
133 static int i40evf_init_vlan(struct rte_eth_dev *dev);
134 static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
135                                      uint16_t rx_queue_id);
136 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
137                                     uint16_t rx_queue_id);
138 static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
139                                      uint16_t tx_queue_id);
140 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
141                                     uint16_t tx_queue_id);
142 static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
143                         struct rte_eth_rss_reta_entry64 *reta_conf,
144                         uint16_t reta_size);
145 static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
146                         struct rte_eth_rss_reta_entry64 *reta_conf,
147                         uint16_t reta_size);
148 static int i40evf_config_rss(struct i40e_vf *vf);
149 static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
150                                       struct rte_eth_rss_conf *rss_conf);
151 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
152                                         struct rte_eth_rss_conf *rss_conf);
153
154 /* Default hash key buffer for RSS */
155 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
156
157 struct rte_i40evf_xstats_name_off {
158         char name[RTE_ETH_XSTATS_NAME_SIZE];
159         unsigned offset;
160 };
161
162 static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
163         {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
164         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
165         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
166         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
167         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
168         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
169                 rx_unknown_protocol)},
170         {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
171         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
172         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
173         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
174         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
175         {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
176 };
177
178 #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
179                 sizeof(rte_i40evf_stats_strings[0]))
180
181 static const struct eth_dev_ops i40evf_eth_dev_ops = {
182         .dev_configure        = i40evf_dev_configure,
183         .dev_start            = i40evf_dev_start,
184         .dev_stop             = i40evf_dev_stop,
185         .promiscuous_enable   = i40evf_dev_promiscuous_enable,
186         .promiscuous_disable  = i40evf_dev_promiscuous_disable,
187         .allmulticast_enable  = i40evf_dev_allmulticast_enable,
188         .allmulticast_disable = i40evf_dev_allmulticast_disable,
189         .link_update          = i40evf_dev_link_update,
190         .stats_get            = i40evf_dev_stats_get,
191         .xstats_get           = i40evf_dev_xstats_get,
192         .xstats_reset         = i40evf_dev_xstats_reset,
193         .dev_close            = i40evf_dev_close,
194         .dev_infos_get        = i40evf_dev_info_get,
195         .vlan_filter_set      = i40evf_vlan_filter_set,
196         .vlan_offload_set     = i40evf_vlan_offload_set,
197         .vlan_pvid_set        = i40evf_vlan_pvid_set,
198         .rx_queue_start       = i40evf_dev_rx_queue_start,
199         .rx_queue_stop        = i40evf_dev_rx_queue_stop,
200         .tx_queue_start       = i40evf_dev_tx_queue_start,
201         .tx_queue_stop        = i40evf_dev_tx_queue_stop,
202         .rx_queue_setup       = i40e_dev_rx_queue_setup,
203         .rx_queue_release     = i40e_dev_rx_queue_release,
204         .tx_queue_setup       = i40e_dev_tx_queue_setup,
205         .tx_queue_release     = i40e_dev_tx_queue_release,
206         .reta_update          = i40evf_dev_rss_reta_update,
207         .reta_query           = i40evf_dev_rss_reta_query,
208         .rss_hash_update      = i40evf_dev_rss_hash_update,
209         .rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
210 };
211
212 static int
213 i40evf_set_mac_type(struct i40e_hw *hw)
214 {
215         int status = I40E_ERR_DEVICE_NOT_SUPPORTED;
216
217         if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
218                 switch (hw->device_id) {
219                 case I40E_DEV_ID_VF:
220                 case I40E_DEV_ID_VF_HV:
221                         hw->mac.type = I40E_MAC_VF;
222                         status = I40E_SUCCESS;
223                         break;
224                 default:
225                         ;
226                 }
227         }
228
229         return status;
230 }
231
232 /*
233  * Parse admin queue message.
234  *
235  * return value:
236  *  < 0: meet error
237  *  0: read sys msg
238  *  > 0: read cmd result
239  */
240 static enum i40evf_aq_result
241 i40evf_parse_pfmsg(struct i40e_vf *vf,
242                    struct i40e_arq_event_info *event,
243                    struct i40evf_arq_msg_info *data)
244 {
245         enum i40e_virtchnl_ops opcode = (enum i40e_virtchnl_ops)\
246                         rte_le_to_cpu_32(event->desc.cookie_high);
247         enum i40e_status_code retval = (enum i40e_status_code)\
248                         rte_le_to_cpu_32(event->desc.cookie_low);
249         enum i40evf_aq_result ret = I40EVF_MSG_CMD;
250
251         /* pf sys event */
252         if (opcode == I40E_VIRTCHNL_OP_EVENT) {
253                 struct i40e_virtchnl_pf_event *vpe =
254                         (struct i40e_virtchnl_pf_event *)event->msg_buf;
255
256                 /* Initialize ret to sys event */
257                 ret = I40EVF_MSG_SYS;
258                 switch (vpe->event) {
259                 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
260                         vf->link_up =
261                                 vpe->event_data.link_event.link_status;
262                         vf->pend_msg |= PFMSG_LINK_CHANGE;
263                         PMD_DRV_LOG(INFO, "Link status update:%s",
264                                     vf->link_up ? "up" : "down");
265                         break;
266                 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
267                         vf->vf_reset = true;
268                         vf->pend_msg |= PFMSG_RESET_IMPENDING;
269                         PMD_DRV_LOG(INFO, "vf is reseting");
270                         break;
271                 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
272                         vf->dev_closed = true;
273                         vf->pend_msg |= PFMSG_DRIVER_CLOSE;
274                         PMD_DRV_LOG(INFO, "PF driver closed");
275                         break;
276                 default:
277                         PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
278                                     __func__, vpe->event);
279                 }
280         } else {
281                 /* async reply msg on command issued by vf previously */
282                 ret = I40EVF_MSG_CMD;
283                 /* Actual data length read from PF */
284                 data->msg_len = event->msg_len;
285         }
286         /* fill the ops and result to notify VF */
287         data->result = retval;
288         data->ops = opcode;
289
290         return ret;
291 }
292
293 /*
294  * Read data in admin queue to get msg from pf driver
295  */
296 static enum i40evf_aq_result
297 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
298 {
299         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
300         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
301         struct i40e_arq_event_info event;
302         int ret;
303         enum i40evf_aq_result result = I40EVF_MSG_NON;
304
305         event.buf_len = data->buf_len;
306         event.msg_buf = data->msg;
307         ret = i40e_clean_arq_element(hw, &event, NULL);
308         /* Can't read any msg from adminQ */
309         if (ret) {
310                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
311                         result = I40EVF_MSG_NON;
312                 else
313                         result = I40EVF_MSG_ERR;
314                 return result;
315         }
316
317         /* Parse the event */
318         result = i40evf_parse_pfmsg(vf, &event, data);
319
320         return result;
321 }
322
323 /*
324  * Polling read until command result return from pf driver or meet error.
325  */
326 static int
327 i40evf_wait_cmd_done(struct rte_eth_dev *dev,
328                      struct i40evf_arq_msg_info *data)
329 {
330         int i = 0;
331         enum i40evf_aq_result ret;
332
333 #define MAX_TRY_TIMES 20
334 #define ASQ_DELAY_MS  100
335         do {
336                 /* Delay some time first */
337                 rte_delay_ms(ASQ_DELAY_MS);
338                 ret = i40evf_read_pfmsg(dev, data);
339                 if (ret == I40EVF_MSG_CMD)
340                         return 0;
341                 else if (ret == I40EVF_MSG_ERR)
342                         return -1;
343
344                 /* If don't read msg or read sys event, continue */
345         } while(i++ < MAX_TRY_TIMES);
346
347         return -1;
348 }
349
350 /**
351  * clear current command. Only call in case execute
352  * _atomic_set_cmd successfully.
353  */
354 static inline void
355 _clear_cmd(struct i40e_vf *vf)
356 {
357         rte_wmb();
358         vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
359 }
360
361 /*
362  * Check there is pending cmd in execution. If none, set new command.
363  */
364 static inline int
365 _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
366 {
367         int ret = rte_atomic32_cmpset(&vf->pend_cmd,
368                         I40E_VIRTCHNL_OP_UNKNOWN, ops);
369
370         if (!ret)
371                 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
372
373         return !ret;
374 }
375
376 static int
377 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
378 {
379         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
380         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
381         int err = -1;
382         struct i40evf_arq_msg_info info;
383
384         if (_atomic_set_cmd(vf, args->ops))
385                 return -1;
386
387         info.msg = args->out_buffer;
388         info.buf_len = args->out_size;
389         info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
390         info.result = I40E_SUCCESS;
391
392         err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
393                      args->in_args, args->in_args_size, NULL);
394         if (err) {
395                 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
396                 _clear_cmd(vf);
397                 return err;
398         }
399
400         err = i40evf_wait_cmd_done(dev, &info);
401         /* read message and it's expected one */
402         if (!err && args->ops == info.ops)
403                 _clear_cmd(vf);
404         else if (err) {
405                 PMD_DRV_LOG(ERR, "Failed to read message from AdminQ");
406                 _clear_cmd(vf);
407         }
408         else if (args->ops != info.ops)
409                 PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
410                             args->ops, info.ops);
411
412         return (err | info.result);
413 }
414
415 /*
416  * Check API version with sync wait until version read or fail from admin queue
417  */
418 static int
419 i40evf_check_api_version(struct rte_eth_dev *dev)
420 {
421         struct i40e_virtchnl_version_info version, *pver;
422         int err;
423         struct vf_cmd_info args;
424         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
425
426         version.major = I40E_VIRTCHNL_VERSION_MAJOR;
427         version.minor = I40E_VIRTCHNL_VERSION_MINOR;
428
429         args.ops = I40E_VIRTCHNL_OP_VERSION;
430         args.in_args = (uint8_t *)&version;
431         args.in_args_size = sizeof(version);
432         args.out_buffer = cmd_result_buffer;
433         args.out_size = I40E_AQ_BUF_SZ;
434
435         err = i40evf_execute_vf_cmd(dev, &args);
436         if (err) {
437                 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
438                 return err;
439         }
440
441         pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
442         vf->version_major = pver->major;
443         vf->version_minor = pver->minor;
444         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
445                 PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
446         else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
447                 (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR))
448                 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
449         else {
450                 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
451                                         vf->version_major, vf->version_minor,
452                                                 I40E_VIRTCHNL_VERSION_MAJOR,
453                                                 I40E_VIRTCHNL_VERSION_MINOR);
454                 return -1;
455         }
456
457         return 0;
458 }
459
460 static int
461 i40evf_get_vf_resource(struct rte_eth_dev *dev)
462 {
463         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
464         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
465         int err;
466         struct vf_cmd_info args;
467         uint32_t caps, len;
468
469         args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
470         args.out_buffer = cmd_result_buffer;
471         args.out_size = I40E_AQ_BUF_SZ;
472         if (PF_IS_V11(vf)) {
473                 caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
474                        I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
475                        I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
476                        I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
477                        I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
478                 args.in_args = (uint8_t *)&caps;
479                 args.in_args_size = sizeof(caps);
480         } else {
481                 args.in_args = NULL;
482                 args.in_args_size = 0;
483         }
484         err = i40evf_execute_vf_cmd(dev, &args);
485
486         if (err) {
487                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
488                 return err;
489         }
490
491         len =  sizeof(struct i40e_virtchnl_vf_resource) +
492                 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
493
494         (void)rte_memcpy(vf->vf_res, args.out_buffer,
495                         RTE_MIN(args.out_size, len));
496         i40e_vf_parse_hw_config(hw, vf->vf_res);
497
498         return 0;
499 }
500
501 static int
502 i40evf_config_promisc(struct rte_eth_dev *dev,
503                       bool enable_unicast,
504                       bool enable_multicast)
505 {
506         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
507         int err;
508         struct vf_cmd_info args;
509         struct i40e_virtchnl_promisc_info promisc;
510
511         promisc.flags = 0;
512         promisc.vsi_id = vf->vsi_res->vsi_id;
513
514         if (enable_unicast)
515                 promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
516
517         if (enable_multicast)
518                 promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
519
520         args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
521         args.in_args = (uint8_t *)&promisc;
522         args.in_args_size = sizeof(promisc);
523         args.out_buffer = cmd_result_buffer;
524         args.out_size = I40E_AQ_BUF_SZ;
525
526         err = i40evf_execute_vf_cmd(dev, &args);
527
528         if (err)
529                 PMD_DRV_LOG(ERR, "fail to execute command "
530                             "CONFIG_PROMISCUOUS_MODE");
531         return err;
532 }
533
534 /* Configure vlan and double vlan offload. Use flag to specify which part to configure */
535 static int
536 i40evf_config_vlan_offload(struct rte_eth_dev *dev,
537                                 bool enable_vlan_strip)
538 {
539         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
540         int err;
541         struct vf_cmd_info args;
542         struct i40e_virtchnl_vlan_offload_info offload;
543
544         offload.vsi_id = vf->vsi_res->vsi_id;
545         offload.enable_vlan_strip = enable_vlan_strip;
546
547         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
548         args.in_args = (uint8_t *)&offload;
549         args.in_args_size = sizeof(offload);
550         args.out_buffer = cmd_result_buffer;
551         args.out_size = I40E_AQ_BUF_SZ;
552
553         err = i40evf_execute_vf_cmd(dev, &args);
554         if (err)
555                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
556
557         return err;
558 }
559
560 static int
561 i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
562                                 struct i40e_vsi_vlan_pvid_info *info)
563 {
564         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
565         int err;
566         struct vf_cmd_info args;
567         struct i40e_virtchnl_pvid_info tpid_info;
568
569         if (dev == NULL || info == NULL) {
570                 PMD_DRV_LOG(ERR, "invalid parameters");
571                 return I40E_ERR_PARAM;
572         }
573
574         memset(&tpid_info, 0, sizeof(tpid_info));
575         tpid_info.vsi_id = vf->vsi_res->vsi_id;
576         (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
577
578         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
579         args.in_args = (uint8_t *)&tpid_info;
580         args.in_args_size = sizeof(tpid_info);
581         args.out_buffer = cmd_result_buffer;
582         args.out_size = I40E_AQ_BUF_SZ;
583
584         err = i40evf_execute_vf_cmd(dev, &args);
585         if (err)
586                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
587
588         return err;
589 }
590
591 static void
592 i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
593                                   uint16_t vsi_id,
594                                   uint16_t queue_id,
595                                   uint16_t nb_txq,
596                                   struct i40e_tx_queue *txq)
597 {
598         txq_info->vsi_id = vsi_id;
599         txq_info->queue_id = queue_id;
600         if (queue_id < nb_txq) {
601                 txq_info->ring_len = txq->nb_tx_desc;
602                 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
603         }
604 }
605
606 static void
607 i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
608                                   uint16_t vsi_id,
609                                   uint16_t queue_id,
610                                   uint16_t nb_rxq,
611                                   uint32_t max_pkt_size,
612                                   struct i40e_rx_queue *rxq)
613 {
614         rxq_info->vsi_id = vsi_id;
615         rxq_info->queue_id = queue_id;
616         rxq_info->max_pkt_size = max_pkt_size;
617         if (queue_id < nb_rxq) {
618                 rxq_info->ring_len = rxq->nb_rx_desc;
619                 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
620                 rxq_info->databuffer_size =
621                         (rte_pktmbuf_data_room_size(rxq->mp) -
622                                 RTE_PKTMBUF_HEADROOM);
623         }
624 }
625
626 /* It configures VSI queues to co-work with Linux PF host */
627 static int
628 i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
629 {
630         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
631         struct i40e_rx_queue **rxq =
632                 (struct i40e_rx_queue **)dev->data->rx_queues;
633         struct i40e_tx_queue **txq =
634                 (struct i40e_tx_queue **)dev->data->tx_queues;
635         struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
636         struct i40e_virtchnl_queue_pair_info *vc_qpi;
637         struct vf_cmd_info args;
638         uint16_t i, nb_qp = vf->num_queue_pairs;
639         const uint32_t size =
640                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
641         uint8_t buff[size];
642         int ret;
643
644         memset(buff, 0, sizeof(buff));
645         vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
646         vc_vqci->vsi_id = vf->vsi_res->vsi_id;
647         vc_vqci->num_queue_pairs = nb_qp;
648
649         for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
650                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
651                         vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
652                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
653                         vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
654                                         vf->max_pkt_len, rxq[i]);
655         }
656         memset(&args, 0, sizeof(args));
657         args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
658         args.in_args = (uint8_t *)vc_vqci;
659         args.in_args_size = size;
660         args.out_buffer = cmd_result_buffer;
661         args.out_size = I40E_AQ_BUF_SZ;
662         ret = i40evf_execute_vf_cmd(dev, &args);
663         if (ret)
664                 PMD_DRV_LOG(ERR, "Failed to execute command of "
665                         "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
666
667         return ret;
668 }
669
670 /* It configures VSI queues to co-work with DPDK PF host */
671 static int
672 i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
673 {
674         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
675         struct i40e_rx_queue **rxq =
676                 (struct i40e_rx_queue **)dev->data->rx_queues;
677         struct i40e_tx_queue **txq =
678                 (struct i40e_tx_queue **)dev->data->tx_queues;
679         struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
680         struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
681         struct vf_cmd_info args;
682         uint16_t i, nb_qp = vf->num_queue_pairs;
683         const uint32_t size =
684                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
685         uint8_t buff[size];
686         int ret;
687
688         memset(buff, 0, sizeof(buff));
689         vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
690         vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
691         vc_vqcei->num_queue_pairs = nb_qp;
692         vc_qpei = vc_vqcei->qpair;
693         for (i = 0; i < nb_qp; i++, vc_qpei++) {
694                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
695                         vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
696                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
697                         vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
698                                         vf->max_pkt_len, rxq[i]);
699                 if (i < dev->data->nb_rx_queues)
700                         /*
701                          * It adds extra info for configuring VSI queues, which
702                          * is needed to enable the configurable crc stripping
703                          * in VF.
704                          */
705                         vc_qpei->rxq_ext.crcstrip =
706                                 dev->data->dev_conf.rxmode.hw_strip_crc;
707         }
708         memset(&args, 0, sizeof(args));
709         args.ops =
710                 (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
711         args.in_args = (uint8_t *)vc_vqcei;
712         args.in_args_size = size;
713         args.out_buffer = cmd_result_buffer;
714         args.out_size = I40E_AQ_BUF_SZ;
715         ret = i40evf_execute_vf_cmd(dev, &args);
716         if (ret)
717                 PMD_DRV_LOG(ERR, "Failed to execute command of "
718                         "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
719
720         return ret;
721 }
722
723 static int
724 i40evf_configure_queues(struct rte_eth_dev *dev)
725 {
726         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
727
728         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
729                 /* To support DPDK PF host */
730                 return i40evf_configure_vsi_queues_ext(dev);
731         else
732                 /* To support Linux PF host */
733                 return i40evf_configure_vsi_queues(dev);
734 }
735
736 static int
737 i40evf_config_irq_map(struct rte_eth_dev *dev)
738 {
739         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
740         struct vf_cmd_info args;
741         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
742                 sizeof(struct i40e_virtchnl_vector_map)];
743         struct i40e_virtchnl_irq_map_info *map_info;
744         int i, err;
745         map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
746         map_info->num_vectors = 1;
747         map_info->vecmap[0].rxitr_idx = I40E_QINT_RQCTL_MSIX_INDX_NOITR;
748         map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
749         /* Alway use default dynamic MSIX interrupt */
750         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
751                 map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
752         else
753                 map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX;
754
755         /* Don't map any tx queue */
756         map_info->vecmap[0].txq_map = 0;
757         map_info->vecmap[0].rxq_map = 0;
758         for (i = 0; i < dev->data->nb_rx_queues; i++)
759                 map_info->vecmap[0].rxq_map |= 1 << i;
760
761         args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
762         args.in_args = (u8 *)cmd_buffer;
763         args.in_args_size = sizeof(cmd_buffer);
764         args.out_buffer = cmd_result_buffer;
765         args.out_size = I40E_AQ_BUF_SZ;
766         err = i40evf_execute_vf_cmd(dev, &args);
767         if (err)
768                 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
769
770         return err;
771 }
772
773 static int
774 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
775                                 bool on)
776 {
777         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
778         struct i40e_virtchnl_queue_select queue_select;
779         int err;
780         struct vf_cmd_info args;
781         memset(&queue_select, 0, sizeof(queue_select));
782         queue_select.vsi_id = vf->vsi_res->vsi_id;
783
784         if (isrx)
785                 queue_select.rx_queues |= 1 << qid;
786         else
787                 queue_select.tx_queues |= 1 << qid;
788
789         if (on)
790                 args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
791         else
792                 args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
793         args.in_args = (u8 *)&queue_select;
794         args.in_args_size = sizeof(queue_select);
795         args.out_buffer = cmd_result_buffer;
796         args.out_size = I40E_AQ_BUF_SZ;
797         err = i40evf_execute_vf_cmd(dev, &args);
798         if (err)
799                 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
800                             isrx ? "RX" : "TX", qid, on ? "on" : "off");
801
802         return err;
803 }
804
805 static int
806 i40evf_start_queues(struct rte_eth_dev *dev)
807 {
808         struct rte_eth_dev_data *dev_data = dev->data;
809         int i;
810         struct i40e_rx_queue *rxq;
811         struct i40e_tx_queue *txq;
812
813         for (i = 0; i < dev->data->nb_rx_queues; i++) {
814                 rxq = dev_data->rx_queues[i];
815                 if (rxq->rx_deferred_start)
816                         continue;
817                 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
818                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
819                         return -1;
820                 }
821         }
822
823         for (i = 0; i < dev->data->nb_tx_queues; i++) {
824                 txq = dev_data->tx_queues[i];
825                 if (txq->tx_deferred_start)
826                         continue;
827                 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
828                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
829                         return -1;
830                 }
831         }
832
833         return 0;
834 }
835
836 static int
837 i40evf_stop_queues(struct rte_eth_dev *dev)
838 {
839         int i;
840
841         /* Stop TX queues first */
842         for (i = 0; i < dev->data->nb_tx_queues; i++) {
843                 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
844                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
845                         return -1;
846                 }
847         }
848
849         /* Then stop RX queues */
850         for (i = 0; i < dev->data->nb_rx_queues; i++) {
851                 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
852                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
853                         return -1;
854                 }
855         }
856
857         return 0;
858 }
859
860 static int
861 i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
862 {
863         struct i40e_virtchnl_ether_addr_list *list;
864         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
865         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
866                         sizeof(struct i40e_virtchnl_ether_addr)];
867         int err;
868         struct vf_cmd_info args;
869
870         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
871                 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
872                             addr->addr_bytes[0], addr->addr_bytes[1],
873                             addr->addr_bytes[2], addr->addr_bytes[3],
874                             addr->addr_bytes[4], addr->addr_bytes[5]);
875                 return -1;
876         }
877
878         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
879         list->vsi_id = vf->vsi_res->vsi_id;
880         list->num_elements = 1;
881         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
882                                         sizeof(addr->addr_bytes));
883
884         args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
885         args.in_args = cmd_buffer;
886         args.in_args_size = sizeof(cmd_buffer);
887         args.out_buffer = cmd_result_buffer;
888         args.out_size = I40E_AQ_BUF_SZ;
889         err = i40evf_execute_vf_cmd(dev, &args);
890         if (err)
891                 PMD_DRV_LOG(ERR, "fail to execute command "
892                             "OP_ADD_ETHER_ADDRESS");
893
894         return err;
895 }
896
897 static int
898 i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
899 {
900         struct i40e_virtchnl_ether_addr_list *list;
901         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
902         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
903                         sizeof(struct i40e_virtchnl_ether_addr)];
904         int err;
905         struct vf_cmd_info args;
906
907         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
908                 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
909                             addr->addr_bytes[0], addr->addr_bytes[1],
910                             addr->addr_bytes[2], addr->addr_bytes[3],
911                             addr->addr_bytes[4], addr->addr_bytes[5]);
912                 return -1;
913         }
914
915         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
916         list->vsi_id = vf->vsi_res->vsi_id;
917         list->num_elements = 1;
918         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
919                         sizeof(addr->addr_bytes));
920
921         args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
922         args.in_args = cmd_buffer;
923         args.in_args_size = sizeof(cmd_buffer);
924         args.out_buffer = cmd_result_buffer;
925         args.out_size = I40E_AQ_BUF_SZ;
926         err = i40evf_execute_vf_cmd(dev, &args);
927         if (err)
928                 PMD_DRV_LOG(ERR, "fail to execute command "
929                             "OP_DEL_ETHER_ADDRESS");
930
931         return err;
932 }
933
934 static int
935 i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
936 {
937         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
938         struct i40e_virtchnl_queue_select q_stats;
939         int err;
940         struct vf_cmd_info args;
941
942         memset(&q_stats, 0, sizeof(q_stats));
943         q_stats.vsi_id = vf->vsi_res->vsi_id;
944         args.ops = I40E_VIRTCHNL_OP_GET_STATS;
945         args.in_args = (u8 *)&q_stats;
946         args.in_args_size = sizeof(q_stats);
947         args.out_buffer = cmd_result_buffer;
948         args.out_size = I40E_AQ_BUF_SZ;
949
950         err = i40evf_execute_vf_cmd(dev, &args);
951         if (err) {
952                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
953                 *pstats = NULL;
954                 return err;
955         }
956         *pstats = (struct i40e_eth_stats *)args.out_buffer;
957         return 0;
958 }
959
960 static int
961 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
962 {
963         int ret;
964         struct i40e_eth_stats *pstats = NULL;
965
966         ret = i40evf_update_stats(dev, &pstats);
967         if (ret != 0)
968                 return 0;
969
970         stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
971                                                 pstats->rx_broadcast;
972         stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
973                                                 pstats->tx_unicast;
974         stats->ierrors = pstats->rx_discards;
975         stats->oerrors = pstats->tx_errors + pstats->tx_discards;
976         stats->ibytes = pstats->rx_bytes;
977         stats->obytes = pstats->tx_bytes;
978
979         return 0;
980 }
981
982 static void
983 i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
984 {
985         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
986         struct i40e_eth_stats *pstats = NULL;
987
988         /* read stat values to clear hardware registers */
989         i40evf_update_stats(dev, &pstats);
990
991         /* set stats offset base on current values */
992         vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
993 }
994
995 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
996                                  struct rte_eth_xstats *xstats, unsigned n)
997 {
998         int ret;
999         unsigned i;
1000         struct i40e_eth_stats *pstats = NULL;
1001
1002         if (n < I40EVF_NB_XSTATS)
1003                 return I40EVF_NB_XSTATS;
1004
1005         ret = i40evf_update_stats(dev, &pstats);
1006         if (ret != 0)
1007                 return 0;
1008
1009         if (!xstats)
1010                 return 0;
1011
1012         /* loop over xstats array and values from pstats */
1013         for (i = 0; i < I40EVF_NB_XSTATS; i++) {
1014                 snprintf(xstats[i].name, sizeof(xstats[i].name),
1015                          "%s", rte_i40evf_stats_strings[i].name);
1016                 xstats[i].value = *(uint64_t *)(((char *)pstats) +
1017                         rte_i40evf_stats_strings[i].offset);
1018         }
1019
1020         return I40EVF_NB_XSTATS;
1021 }
1022
1023 static int
1024 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1025 {
1026         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1027         struct i40e_virtchnl_vlan_filter_list *vlan_list;
1028         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
1029                                                         sizeof(uint16_t)];
1030         int err;
1031         struct vf_cmd_info args;
1032
1033         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
1034         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1035         vlan_list->num_elements = 1;
1036         vlan_list->vlan_id[0] = vlanid;
1037
1038         args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
1039         args.in_args = (u8 *)&cmd_buffer;
1040         args.in_args_size = sizeof(cmd_buffer);
1041         args.out_buffer = cmd_result_buffer;
1042         args.out_size = I40E_AQ_BUF_SZ;
1043         err = i40evf_execute_vf_cmd(dev, &args);
1044         if (err)
1045                 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
1046
1047         return err;
1048 }
1049
1050 static int
1051 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1052 {
1053         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1054         struct i40e_virtchnl_vlan_filter_list *vlan_list;
1055         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
1056                                                         sizeof(uint16_t)];
1057         int err;
1058         struct vf_cmd_info args;
1059
1060         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
1061         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1062         vlan_list->num_elements = 1;
1063         vlan_list->vlan_id[0] = vlanid;
1064
1065         args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
1066         args.in_args = (u8 *)&cmd_buffer;
1067         args.in_args_size = sizeof(cmd_buffer);
1068         args.out_buffer = cmd_result_buffer;
1069         args.out_size = I40E_AQ_BUF_SZ;
1070         err = i40evf_execute_vf_cmd(dev, &args);
1071         if (err)
1072                 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
1073
1074         return err;
1075 }
1076
1077 static int
1078 i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
1079 {
1080         int err;
1081         struct vf_cmd_info args;
1082         struct rte_eth_link *new_link;
1083
1084         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
1085         args.in_args = NULL;
1086         args.in_args_size = 0;
1087         args.out_buffer = cmd_result_buffer;
1088         args.out_size = I40E_AQ_BUF_SZ;
1089         err = i40evf_execute_vf_cmd(dev, &args);
1090         if (err) {
1091                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
1092                 return err;
1093         }
1094
1095         new_link = (struct rte_eth_link *)args.out_buffer;
1096         (void)rte_memcpy(link, new_link, sizeof(*link));
1097
1098         return 0;
1099 }
1100
1101 static const struct rte_pci_id pci_id_i40evf_map[] = {
1102 #define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
1103 #include "rte_pci_dev_ids.h"
1104 { .vendor_id = 0, /* sentinel */ },
1105 };
1106
1107 static inline int
1108 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
1109                                     struct rte_eth_link *link)
1110 {
1111         struct rte_eth_link *dst = &(dev->data->dev_link);
1112         struct rte_eth_link *src = link;
1113
1114         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1115                                         *(uint64_t *)src) == 0)
1116                 return -1;
1117
1118         return 0;
1119 }
1120
1121 static int
1122 i40evf_reset_vf(struct i40e_hw *hw)
1123 {
1124         int i, reset;
1125
1126         if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1127                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1128                 return -1;
1129         }
1130         /**
1131           * After issuing vf reset command to pf, pf won't necessarily
1132           * reset vf, it depends on what state it exactly is. If it's not
1133           * initialized yet, it won't have vf reset since it's in a certain
1134           * state. If not, it will try to reset. Even vf is reset, pf will
1135           * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1136           * it to ACTIVE. In this duration, vf may not catch the moment that
1137           * COMPLETE is set. So, for vf, we'll try to wait a long time.
1138           */
1139         rte_delay_ms(200);
1140
1141         for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1142                 reset = rd32(hw, I40E_VFGEN_RSTAT) &
1143                         I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1144                 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1145                 if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
1146                         break;
1147                 else
1148                         rte_delay_ms(50);
1149         }
1150
1151         if (i >= MAX_RESET_WAIT_CNT) {
1152                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1153                 return -1;
1154         }
1155
1156         return 0;
1157 }
1158
1159 static int
1160 i40evf_init_vf(struct rte_eth_dev *dev)
1161 {
1162         int i, err, bufsz;
1163         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1164         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1165
1166         vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1167         vf->dev_data = dev->data;
1168         err = i40evf_set_mac_type(hw);
1169         if (err) {
1170                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1171                 goto err;
1172         }
1173
1174         i40e_init_adminq_parameter(hw);
1175         err = i40e_init_adminq(hw);
1176         if (err) {
1177                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1178                 goto err;
1179         }
1180
1181
1182         /* Reset VF and wait until it's complete */
1183         if (i40evf_reset_vf(hw)) {
1184                 PMD_INIT_LOG(ERR, "reset NIC failed");
1185                 goto err_aq;
1186         }
1187
1188         /* VF reset, shutdown admin queue and initialize again */
1189         if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1190                 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1191                 return -1;
1192         }
1193
1194         i40e_init_adminq_parameter(hw);
1195         if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1196                 PMD_INIT_LOG(ERR, "init_adminq failed");
1197                 return -1;
1198         }
1199         if (i40evf_check_api_version(dev) != 0) {
1200                 PMD_INIT_LOG(ERR, "check_api version failed");
1201                 goto err_aq;
1202         }
1203         bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1204                 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1205         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1206         if (!vf->vf_res) {
1207                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1208                         goto err_aq;
1209         }
1210
1211         if (i40evf_get_vf_resource(dev) != 0) {
1212                 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1213                 goto err_alloc;
1214         }
1215
1216         /* got VF config message back from PF, now we can parse it */
1217         for (i = 0; i < vf->vf_res->num_vsis; i++) {
1218                 if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
1219                         vf->vsi_res = &vf->vf_res->vsi_res[i];
1220         }
1221
1222         if (!vf->vsi_res) {
1223                 PMD_INIT_LOG(ERR, "no LAN VSI found");
1224                 goto err_alloc;
1225         }
1226
1227         vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1228         vf->vsi.type = vf->vsi_res->vsi_type;
1229         vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1230
1231         /* check mac addr, if it's not valid, genrate one */
1232         if (I40E_SUCCESS != i40e_validate_mac_addr(\
1233                         vf->vsi_res->default_mac_addr))
1234                 eth_random_addr(vf->vsi_res->default_mac_addr);
1235
1236         ether_addr_copy((struct ether_addr *)vf->vsi_res->default_mac_addr,
1237                                         (struct ether_addr *)hw->mac.addr);
1238
1239         return 0;
1240
1241 err_alloc:
1242         rte_free(vf->vf_res);
1243 err_aq:
1244         i40e_shutdown_adminq(hw); /* ignore error */
1245 err:
1246         return -1;
1247 }
1248
1249 static int
1250 i40evf_uninit_vf(struct rte_eth_dev *dev)
1251 {
1252         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1253         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1254
1255         PMD_INIT_FUNC_TRACE();
1256
1257         if (hw->adapter_stopped == 0)
1258                 i40evf_dev_close(dev);
1259         rte_free(vf->vf_res);
1260         vf->vf_res = NULL;
1261
1262         return 0;
1263 }
1264
1265 static int
1266 i40evf_dev_init(struct rte_eth_dev *eth_dev)
1267 {
1268         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
1269                         eth_dev->data->dev_private);
1270
1271         PMD_INIT_FUNC_TRACE();
1272
1273         /* assign ops func pointer */
1274         eth_dev->dev_ops = &i40evf_eth_dev_ops;
1275         eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1276         eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1277
1278         /*
1279          * For secondary processes, we don't initialise any further as primary
1280          * has already done this work.
1281          */
1282         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1283                 i40e_set_rx_function(eth_dev);
1284                 i40e_set_tx_function(eth_dev);
1285                 return 0;
1286         }
1287
1288         hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
1289         hw->device_id = eth_dev->pci_dev->id.device_id;
1290         hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
1291         hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
1292         hw->bus.device = eth_dev->pci_dev->addr.devid;
1293         hw->bus.func = eth_dev->pci_dev->addr.function;
1294         hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1295         hw->adapter_stopped = 0;
1296
1297         if(i40evf_init_vf(eth_dev) != 0) {
1298                 PMD_INIT_LOG(ERR, "Init vf failed");
1299                 return -1;
1300         }
1301
1302         /* copy mac addr */
1303         eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1304                                         ETHER_ADDR_LEN, 0);
1305         if (eth_dev->data->mac_addrs == NULL) {
1306                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
1307                                 "store MAC addresses", ETHER_ADDR_LEN);
1308                 return -ENOMEM;
1309         }
1310         ether_addr_copy((struct ether_addr *)hw->mac.addr,
1311                 (struct ether_addr *)eth_dev->data->mac_addrs);
1312
1313         return 0;
1314 }
1315
1316 static int
1317 i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
1318 {
1319         PMD_INIT_FUNC_TRACE();
1320
1321         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1322                 return -EPERM;
1323
1324         eth_dev->dev_ops = NULL;
1325         eth_dev->rx_pkt_burst = NULL;
1326         eth_dev->tx_pkt_burst = NULL;
1327
1328         if (i40evf_uninit_vf(eth_dev) != 0) {
1329                 PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
1330                 return -1;
1331         }
1332
1333         rte_free(eth_dev->data->mac_addrs);
1334         eth_dev->data->mac_addrs = NULL;
1335
1336         return 0;
1337 }
1338 /*
1339  * virtual function driver struct
1340  */
1341 static struct eth_driver rte_i40evf_pmd = {
1342         .pci_drv = {
1343                 .name = "rte_i40evf_pmd",
1344                 .id_table = pci_id_i40evf_map,
1345                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
1346         },
1347         .eth_dev_init = i40evf_dev_init,
1348         .eth_dev_uninit = i40evf_dev_uninit,
1349         .dev_private_size = sizeof(struct i40e_adapter),
1350 };
1351
1352 /*
1353  * VF Driver initialization routine.
1354  * Invoked one at EAL init time.
1355  * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
1356  */
1357 static int
1358 rte_i40evf_pmd_init(const char *name __rte_unused,
1359                     const char *params __rte_unused)
1360 {
1361         PMD_INIT_FUNC_TRACE();
1362
1363         rte_eth_driver_register(&rte_i40evf_pmd);
1364
1365         return 0;
1366 }
1367
1368 static struct rte_driver rte_i40evf_driver = {
1369         .type = PMD_PDEV,
1370         .init = rte_i40evf_pmd_init,
1371 };
1372
1373 PMD_REGISTER_DRIVER(rte_i40evf_driver);
1374
1375 static int
1376 i40evf_dev_configure(struct rte_eth_dev *dev)
1377 {
1378         struct i40e_adapter *ad =
1379                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1380
1381         /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1382          * allocation or vector Rx preconditions we will reset it.
1383          */
1384         ad->rx_bulk_alloc_allowed = true;
1385         ad->rx_vec_allowed = true;
1386         ad->tx_simple_allowed = true;
1387         ad->tx_vec_allowed = true;
1388
1389         return i40evf_init_vlan(dev);
1390 }
1391
1392 static int
1393 i40evf_init_vlan(struct rte_eth_dev *dev)
1394 {
1395         struct rte_eth_dev_data *data = dev->data;
1396         int ret;
1397
1398         /* Apply vlan offload setting */
1399         i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1400
1401         /* Apply pvid setting */
1402         ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
1403                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
1404         return ret;
1405 }
1406
1407 static void
1408 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1409 {
1410         bool enable_vlan_strip = 0;
1411         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1412         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1413
1414         /* Linux pf host doesn't support vlan offload yet */
1415         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1416                 /* Vlan stripping setting */
1417                 if (mask & ETH_VLAN_STRIP_MASK) {
1418                         /* Enable or disable VLAN stripping */
1419                         if (dev_conf->rxmode.hw_vlan_strip)
1420                                 enable_vlan_strip = 1;
1421                         else
1422                                 enable_vlan_strip = 0;
1423
1424                         i40evf_config_vlan_offload(dev, enable_vlan_strip);
1425                 }
1426         }
1427 }
1428
1429 static int
1430 i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1431 {
1432         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1433         struct i40e_vsi_vlan_pvid_info info;
1434         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1435
1436         memset(&info, 0, sizeof(info));
1437         info.on = on;
1438
1439         /* Linux pf host don't support vlan offload yet */
1440         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1441                 if (info.on)
1442                         info.config.pvid = pvid;
1443                 else {
1444                         info.config.reject.tagged =
1445                                 dev_conf->txmode.hw_vlan_reject_tagged;
1446                         info.config.reject.untagged =
1447                                 dev_conf->txmode.hw_vlan_reject_untagged;
1448                 }
1449                 return i40evf_config_vlan_pvid(dev, &info);
1450         }
1451
1452         return 0;
1453 }
1454
1455 static int
1456 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1457 {
1458         struct i40e_rx_queue *rxq;
1459         int err = 0;
1460         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1461
1462         PMD_INIT_FUNC_TRACE();
1463
1464         if (rx_queue_id < dev->data->nb_rx_queues) {
1465                 rxq = dev->data->rx_queues[rx_queue_id];
1466
1467                 err = i40e_alloc_rx_queue_mbufs(rxq);
1468                 if (err) {
1469                         PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1470                         return err;
1471                 }
1472
1473                 rte_wmb();
1474
1475                 /* Init the RX tail register. */
1476                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1477                 I40EVF_WRITE_FLUSH(hw);
1478
1479                 /* Ready to switch the queue on */
1480                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1481
1482                 if (err)
1483                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1484                                     rx_queue_id);
1485         }
1486
1487         return err;
1488 }
1489
1490 static int
1491 i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1492 {
1493         struct i40e_rx_queue *rxq;
1494         int err;
1495
1496         if (rx_queue_id < dev->data->nb_rx_queues) {
1497                 rxq = dev->data->rx_queues[rx_queue_id];
1498
1499                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1500
1501                 if (err) {
1502                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1503                                     rx_queue_id);
1504                         return err;
1505                 }
1506
1507                 i40e_rx_queue_release_mbufs(rxq);
1508                 i40e_reset_rx_queue(rxq);
1509         }
1510
1511         return 0;
1512 }
1513
1514 static int
1515 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1516 {
1517         int err = 0;
1518
1519         PMD_INIT_FUNC_TRACE();
1520
1521         if (tx_queue_id < dev->data->nb_tx_queues) {
1522
1523                 /* Ready to switch the queue on */
1524                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1525
1526                 if (err)
1527                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1528                                     tx_queue_id);
1529         }
1530
1531         return err;
1532 }
1533
1534 static int
1535 i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1536 {
1537         struct i40e_tx_queue *txq;
1538         int err;
1539
1540         if (tx_queue_id < dev->data->nb_tx_queues) {
1541                 txq = dev->data->tx_queues[tx_queue_id];
1542
1543                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1544
1545                 if (err) {
1546                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1547                                     tx_queue_id);
1548                         return err;
1549                 }
1550
1551                 i40e_tx_queue_release_mbufs(txq);
1552                 i40e_reset_tx_queue(txq);
1553         }
1554
1555         return 0;
1556 }
1557
1558 static int
1559 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1560 {
1561         int ret;
1562
1563         if (on)
1564                 ret = i40evf_add_vlan(dev, vlan_id);
1565         else
1566                 ret = i40evf_del_vlan(dev,vlan_id);
1567
1568         return ret;
1569 }
1570
1571 static int
1572 i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
1573 {
1574         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1575         struct rte_eth_dev_data *dev_data = dev->data;
1576         struct rte_pktmbuf_pool_private *mbp_priv;
1577         uint16_t buf_size, len;
1578
1579         rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
1580         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1581         I40EVF_WRITE_FLUSH(hw);
1582
1583         /* Calculate the maximum packet length allowed */
1584         mbp_priv = rte_mempool_get_priv(rxq->mp);
1585         buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
1586                                         RTE_PKTMBUF_HEADROOM);
1587         rxq->hs_mode = i40e_header_split_none;
1588         rxq->rx_hdr_len = 0;
1589         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
1590         len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
1591         rxq->max_pkt_len = RTE_MIN(len,
1592                 dev_data->dev_conf.rxmode.max_rx_pkt_len);
1593
1594         /**
1595          * Check if the jumbo frame and maximum packet length are set correctly
1596          */
1597         if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
1598                 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
1599                     rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1600                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1601                                 "larger than %u and smaller than %u, as jumbo "
1602                                 "frame is enabled", (uint32_t)ETHER_MAX_LEN,
1603                                         (uint32_t)I40E_FRAME_SIZE_MAX);
1604                         return I40E_ERR_CONFIG;
1605                 }
1606         } else {
1607                 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
1608                     rxq->max_pkt_len > ETHER_MAX_LEN) {
1609                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1610                                 "larger than %u and smaller than %u, as jumbo "
1611                                 "frame is disabled", (uint32_t)ETHER_MIN_LEN,
1612                                                 (uint32_t)ETHER_MAX_LEN);
1613                         return I40E_ERR_CONFIG;
1614                 }
1615         }
1616
1617         if (dev_data->dev_conf.rxmode.enable_scatter ||
1618             (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
1619                 dev_data->scattered_rx = 1;
1620         }
1621
1622         return 0;
1623 }
1624
1625 static int
1626 i40evf_rx_init(struct rte_eth_dev *dev)
1627 {
1628         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1629         uint16_t i;
1630         int ret = I40E_SUCCESS;
1631         struct i40e_rx_queue **rxq =
1632                 (struct i40e_rx_queue **)dev->data->rx_queues;
1633
1634         i40evf_config_rss(vf);
1635         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1636                 if (!rxq[i] || !rxq[i]->q_set)
1637                         continue;
1638                 ret = i40evf_rxq_init(dev, rxq[i]);
1639                 if (ret != I40E_SUCCESS)
1640                         break;
1641         }
1642         if (ret == I40E_SUCCESS)
1643                 i40e_set_rx_function(dev);
1644
1645         return ret;
1646 }
1647
1648 static void
1649 i40evf_tx_init(struct rte_eth_dev *dev)
1650 {
1651         uint16_t i;
1652         struct i40e_tx_queue **txq =
1653                 (struct i40e_tx_queue **)dev->data->tx_queues;
1654         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1655
1656         for (i = 0; i < dev->data->nb_tx_queues; i++)
1657                 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1658
1659         i40e_set_tx_function(dev);
1660 }
1661
1662 static inline void
1663 i40evf_enable_queues_intr(struct rte_eth_dev *dev)
1664 {
1665         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1666         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1667
1668         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1669                 /* To support DPDK PF host */
1670                 I40E_WRITE_REG(hw,
1671                         I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1672                         I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1673                         I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1674         else
1675                 /* To support Linux PF host */
1676                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1677                                 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1678                                 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK);
1679 }
1680
1681 static inline void
1682 i40evf_disable_queues_intr(struct rte_eth_dev *dev)
1683 {
1684         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1685         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1686
1687         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1688                 I40E_WRITE_REG(hw,
1689                         I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1690                         0);
1691         else
1692                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1693 }
1694
1695 static int
1696 i40evf_dev_start(struct rte_eth_dev *dev)
1697 {
1698         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1699         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1700         struct ether_addr mac_addr;
1701
1702         PMD_INIT_FUNC_TRACE();
1703
1704         hw->adapter_stopped = 0;
1705
1706         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1707         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1708                                         dev->data->nb_tx_queues);
1709
1710         if (i40evf_rx_init(dev) != 0){
1711                 PMD_DRV_LOG(ERR, "failed to do RX init");
1712                 return -1;
1713         }
1714
1715         i40evf_tx_init(dev);
1716
1717         if (i40evf_configure_queues(dev) != 0) {
1718                 PMD_DRV_LOG(ERR, "configure queues failed");
1719                 goto err_queue;
1720         }
1721         if (i40evf_config_irq_map(dev)) {
1722                 PMD_DRV_LOG(ERR, "config_irq_map failed");
1723                 goto err_queue;
1724         }
1725
1726         /* Set mac addr */
1727         (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
1728                                 sizeof(mac_addr.addr_bytes));
1729         if (i40evf_add_mac_addr(dev, &mac_addr)) {
1730                 PMD_DRV_LOG(ERR, "Failed to add mac addr");
1731                 goto err_queue;
1732         }
1733
1734         if (i40evf_start_queues(dev) != 0) {
1735                 PMD_DRV_LOG(ERR, "enable queues failed");
1736                 goto err_mac;
1737         }
1738
1739         i40evf_enable_queues_intr(dev);
1740         return 0;
1741
1742 err_mac:
1743         i40evf_del_mac_addr(dev, &mac_addr);
1744 err_queue:
1745         return -1;
1746 }
1747
1748 static void
1749 i40evf_dev_stop(struct rte_eth_dev *dev)
1750 {
1751         PMD_INIT_FUNC_TRACE();
1752
1753         i40evf_disable_queues_intr(dev);
1754         i40evf_stop_queues(dev);
1755         i40e_dev_clear_queues(dev);
1756 }
1757
1758 static int
1759 i40evf_dev_link_update(struct rte_eth_dev *dev,
1760                        __rte_unused int wait_to_complete)
1761 {
1762         struct rte_eth_link new_link;
1763         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1764         /*
1765          * DPDK pf host provide interfacet to acquire link status
1766          * while Linux driver does not
1767          */
1768         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1769                 i40evf_get_link_status(dev, &new_link);
1770         else {
1771                 /* Always assume it's up, for Linux driver PF host */
1772                 new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
1773                 new_link.link_speed  = ETH_LINK_SPEED_10000;
1774                 new_link.link_status = 1;
1775         }
1776         i40evf_dev_atomic_write_link_status(dev, &new_link);
1777
1778         return 0;
1779 }
1780
1781 static void
1782 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1783 {
1784         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1785         int ret;
1786
1787         /* If enabled, just return */
1788         if (vf->promisc_unicast_enabled)
1789                 return;
1790
1791         ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
1792         if (ret == 0)
1793                 vf->promisc_unicast_enabled = TRUE;
1794 }
1795
1796 static void
1797 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1798 {
1799         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1800         int ret;
1801
1802         /* If disabled, just return */
1803         if (!vf->promisc_unicast_enabled)
1804                 return;
1805
1806         ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
1807         if (ret == 0)
1808                 vf->promisc_unicast_enabled = FALSE;
1809 }
1810
1811 static void
1812 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1813 {
1814         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1815         int ret;
1816
1817         /* If enabled, just return */
1818         if (vf->promisc_multicast_enabled)
1819                 return;
1820
1821         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
1822         if (ret == 0)
1823                 vf->promisc_multicast_enabled = TRUE;
1824 }
1825
1826 static void
1827 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1828 {
1829         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1830         int ret;
1831
1832         /* If enabled, just return */
1833         if (!vf->promisc_multicast_enabled)
1834                 return;
1835
1836         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
1837         if (ret == 0)
1838                 vf->promisc_multicast_enabled = FALSE;
1839 }
1840
1841 static void
1842 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1843 {
1844         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1845
1846         memset(dev_info, 0, sizeof(*dev_info));
1847         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
1848         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
1849         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1850         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1851         dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1852         dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
1853         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
1854         dev_info->rx_offload_capa =
1855                 DEV_RX_OFFLOAD_VLAN_STRIP |
1856                 DEV_RX_OFFLOAD_QINQ_STRIP |
1857                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1858                 DEV_RX_OFFLOAD_UDP_CKSUM |
1859                 DEV_RX_OFFLOAD_TCP_CKSUM;
1860         dev_info->tx_offload_capa =
1861                 DEV_TX_OFFLOAD_VLAN_INSERT |
1862                 DEV_TX_OFFLOAD_QINQ_INSERT |
1863                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1864                 DEV_TX_OFFLOAD_UDP_CKSUM |
1865                 DEV_TX_OFFLOAD_TCP_CKSUM |
1866                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1867
1868         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1869                 .rx_thresh = {
1870                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
1871                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
1872                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
1873                 },
1874                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1875                 .rx_drop_en = 0,
1876         };
1877
1878         dev_info->default_txconf = (struct rte_eth_txconf) {
1879                 .tx_thresh = {
1880                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
1881                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
1882                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
1883                 },
1884                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1885                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1886                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1887                                 ETH_TXQ_FLAGS_NOOFFLOADS,
1888         };
1889
1890         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1891                 .nb_max = I40E_MAX_RING_DESC,
1892                 .nb_min = I40E_MIN_RING_DESC,
1893                 .nb_align = I40E_ALIGN_RING_DESC,
1894         };
1895
1896         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1897                 .nb_max = I40E_MAX_RING_DESC,
1898                 .nb_min = I40E_MIN_RING_DESC,
1899                 .nb_align = I40E_ALIGN_RING_DESC,
1900         };
1901 }
1902
1903 static void
1904 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1905 {
1906         if (i40evf_get_statics(dev, stats))
1907                 PMD_DRV_LOG(ERR, "Get statics failed");
1908 }
1909
1910 static void
1911 i40evf_dev_close(struct rte_eth_dev *dev)
1912 {
1913         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1914
1915         i40evf_dev_stop(dev);
1916         hw->adapter_stopped = 1;
1917         i40e_dev_free_queues(dev);
1918         i40evf_reset_vf(hw);
1919         i40e_shutdown_adminq(hw);
1920 }
1921
1922 static int
1923 i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
1924                            struct rte_eth_rss_reta_entry64 *reta_conf,
1925                            uint16_t reta_size)
1926 {
1927         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1928         uint32_t lut, l;
1929         uint16_t i, j;
1930         uint16_t idx, shift;
1931         uint8_t mask;
1932
1933         if (reta_size != ETH_RSS_RETA_SIZE_64) {
1934                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1935                         "(%d) doesn't match the number of hardware can "
1936                         "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
1937                 return -EINVAL;
1938         }
1939
1940         for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1941                 idx = i / RTE_RETA_GROUP_SIZE;
1942                 shift = i % RTE_RETA_GROUP_SIZE;
1943                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1944                                                 I40E_4_BIT_MASK);
1945                 if (!mask)
1946                         continue;
1947                 if (mask == I40E_4_BIT_MASK)
1948                         l = 0;
1949                 else
1950                         l = I40E_READ_REG(hw, I40E_VFQF_HLUT(i >> 2));
1951
1952                 for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
1953                         if (mask & (0x1 << j))
1954                                 lut |= reta_conf[idx].reta[shift + j] <<
1955                                                         (CHAR_BIT * j);
1956                         else
1957                                 lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
1958                 }
1959                 I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
1960         }
1961
1962         return 0;
1963 }
1964
1965 static int
1966 i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
1967                           struct rte_eth_rss_reta_entry64 *reta_conf,
1968                           uint16_t reta_size)
1969 {
1970         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1971         uint32_t lut;
1972         uint16_t i, j;
1973         uint16_t idx, shift;
1974         uint8_t mask;
1975
1976         if (reta_size != ETH_RSS_RETA_SIZE_64) {
1977                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1978                         "(%d) doesn't match the number of hardware can "
1979                         "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
1980                 return -EINVAL;
1981         }
1982
1983         for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1984                 idx = i / RTE_RETA_GROUP_SIZE;
1985                 shift = i % RTE_RETA_GROUP_SIZE;
1986                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1987                                                 I40E_4_BIT_MASK);
1988                 if (!mask)
1989                         continue;
1990
1991                 lut = I40E_READ_REG(hw, I40E_VFQF_HLUT(i >> 2));
1992                 for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
1993                         if (mask & (0x1 << j))
1994                                 reta_conf[idx].reta[shift + j] =
1995                                         ((lut >> (CHAR_BIT * j)) &
1996                                                 I40E_8_BIT_MASK);
1997                 }
1998         }
1999
2000         return 0;
2001 }
2002
2003 static int
2004 i40evf_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
2005 {
2006         uint32_t *hash_key;
2007         uint8_t hash_key_len;
2008         uint64_t rss_hf, hena;
2009
2010         hash_key = (uint32_t *)(rss_conf->rss_key);
2011         hash_key_len = rss_conf->rss_key_len;
2012         if (hash_key != NULL && hash_key_len >=
2013                 (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
2014                 uint16_t i;
2015
2016                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2017                         I40E_WRITE_REG(hw, I40E_VFQF_HKEY(i), hash_key[i]);
2018         }
2019
2020         rss_hf = rss_conf->rss_hf;
2021         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2022         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2023         hena &= ~I40E_RSS_HENA_ALL;
2024         hena |= i40e_config_hena(rss_hf);
2025         I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2026         I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2027         I40EVF_WRITE_FLUSH(hw);
2028
2029         return 0;
2030 }
2031
2032 static void
2033 i40evf_disable_rss(struct i40e_vf *vf)
2034 {
2035         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2036         uint64_t hena;
2037
2038         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2039         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2040         hena &= ~I40E_RSS_HENA_ALL;
2041         I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2042         I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2043         I40EVF_WRITE_FLUSH(hw);
2044 }
2045
2046 static int
2047 i40evf_config_rss(struct i40e_vf *vf)
2048 {
2049         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2050         struct rte_eth_rss_conf rss_conf;
2051         uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
2052         uint16_t num;
2053
2054         if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
2055                 i40evf_disable_rss(vf);
2056                 PMD_DRV_LOG(DEBUG, "RSS not configured\n");
2057                 return 0;
2058         }
2059
2060         num = i40e_align_floor(vf->dev_data->nb_rx_queues);
2061         /* Fill out the look up table */
2062         for (i = 0, j = 0; i < nb_q; i++, j++) {
2063                 if (j >= num)
2064                         j = 0;
2065                 lut = (lut << 8) | j;
2066                 if ((i & 3) == 3)
2067                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
2068         }
2069
2070         rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
2071         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
2072                 i40evf_disable_rss(vf);
2073                 PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
2074                 return 0;
2075         }
2076
2077         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < nb_q) {
2078                 /* Calculate the default hash key */
2079                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2080                         rss_key_default[i] = (uint32_t)rte_rand();
2081                 rss_conf.rss_key = (uint8_t *)rss_key_default;
2082                 rss_conf.rss_key_len = nb_q;
2083         }
2084
2085         return i40evf_hw_rss_hash_set(hw, &rss_conf);
2086 }
2087
2088 static int
2089 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
2090                            struct rte_eth_rss_conf *rss_conf)
2091 {
2092         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2093         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
2094         uint64_t hena;
2095
2096         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2097         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2098         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
2099                 if (rss_hf != 0) /* Enable RSS */
2100                         return -EINVAL;
2101                 return 0;
2102         }
2103
2104         /* RSS enabled */
2105         if (rss_hf == 0) /* Disable RSS */
2106                 return -EINVAL;
2107
2108         return i40evf_hw_rss_hash_set(hw, rss_conf);
2109 }
2110
2111 static int
2112 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2113                              struct rte_eth_rss_conf *rss_conf)
2114 {
2115         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2116         uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
2117         uint64_t hena;
2118         uint16_t i;
2119
2120         if (hash_key) {
2121                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2122                         hash_key[i] = I40E_READ_REG(hw, I40E_VFQF_HKEY(i));
2123                 rss_conf->rss_key_len = i * sizeof(uint32_t);
2124         }
2125         hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
2126         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
2127         rss_conf->rss_hf = i40e_parse_hena(hena);
2128
2129         return 0;
2130 }