ethdev: query supported packet types
[dpdk.git] / drivers / net / i40e / i40e_ethdev_vf.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
45
46 #include <rte_interrupts.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_eal.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_atomic.h>
59 #include <rte_malloc.h>
60 #include <rte_dev.h>
61
62 #include "i40e_logs.h"
63 #include "base/i40e_prototype.h"
64 #include "base/i40e_adminq_cmd.h"
65 #include "base/i40e_type.h"
66
67 #include "i40e_rxtx.h"
68 #include "i40e_ethdev.h"
69 #include "i40e_pf.h"
70 #define I40EVF_VSI_DEFAULT_MSIX_INTR     1
71 #define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
72
73 /* busy wait delay in msec */
74 #define I40EVF_BUSY_WAIT_DELAY 10
75 #define I40EVF_BUSY_WAIT_COUNT 50
76 #define MAX_RESET_WAIT_CNT     20
77 /*ITR index for NOITR*/
78 #define I40E_QINT_RQCTL_MSIX_INDX_NOITR     3
79
80 struct i40evf_arq_msg_info {
81         enum i40e_virtchnl_ops ops;
82         enum i40e_status_code result;
83         uint16_t buf_len;
84         uint16_t msg_len;
85         uint8_t *msg;
86 };
87
88 struct vf_cmd_info {
89         enum i40e_virtchnl_ops ops;
90         uint8_t *in_args;
91         uint32_t in_args_size;
92         uint8_t *out_buffer;
93         /* Input & output type. pass in buffer size and pass out
94          * actual return result
95          */
96         uint32_t out_size;
97 };
98
99 enum i40evf_aq_result {
100         I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
101         I40EVF_MSG_NON,      /* Read nothing from admin queue */
102         I40EVF_MSG_SYS,      /* Read system msg from admin queue */
103         I40EVF_MSG_CMD,      /* Read async command result */
104 };
105
106 /* A share buffer to store the command result from PF driver */
107 static uint8_t cmd_result_buffer[I40E_AQ_BUF_SZ];
108
109 static int i40evf_dev_configure(struct rte_eth_dev *dev);
110 static int i40evf_dev_start(struct rte_eth_dev *dev);
111 static void i40evf_dev_stop(struct rte_eth_dev *dev);
112 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
113                                 struct rte_eth_dev_info *dev_info);
114 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
115                                   __rte_unused int wait_to_complete);
116 static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
117                                 struct rte_eth_stats *stats);
118 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
119                                  struct rte_eth_xstats *xstats, unsigned n);
120 static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
121 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
122                                   uint16_t vlan_id, int on);
123 static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
124 static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
125                                 int on);
126 static void i40evf_dev_close(struct rte_eth_dev *dev);
127 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
128 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
129 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
130 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
131 static int i40evf_get_link_status(struct rte_eth_dev *dev,
132                                   struct rte_eth_link *link);
133 static int i40evf_init_vlan(struct rte_eth_dev *dev);
134 static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
135                                      uint16_t rx_queue_id);
136 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
137                                     uint16_t rx_queue_id);
138 static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
139                                      uint16_t tx_queue_id);
140 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
141                                     uint16_t tx_queue_id);
142 static void i40evf_add_mac_addr(struct rte_eth_dev *dev,
143                                 struct ether_addr *addr,
144                                 uint32_t index,
145                                 uint32_t pool);
146 static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
147 static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
148                         struct rte_eth_rss_reta_entry64 *reta_conf,
149                         uint16_t reta_size);
150 static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
151                         struct rte_eth_rss_reta_entry64 *reta_conf,
152                         uint16_t reta_size);
153 static int i40evf_config_rss(struct i40e_vf *vf);
154 static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
155                                       struct rte_eth_rss_conf *rss_conf);
156 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
157                                         struct rte_eth_rss_conf *rss_conf);
158 static int
159 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
160 static int
161 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
162
163 /* Default hash key buffer for RSS */
164 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
165
166 struct rte_i40evf_xstats_name_off {
167         char name[RTE_ETH_XSTATS_NAME_SIZE];
168         unsigned offset;
169 };
170
171 static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
172         {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
173         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
174         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
175         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
176         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
177         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
178                 rx_unknown_protocol)},
179         {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
180         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
181         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
182         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
183         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
184         {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
185 };
186
187 #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
188                 sizeof(rte_i40evf_stats_strings[0]))
189
190 static const struct eth_dev_ops i40evf_eth_dev_ops = {
191         .dev_configure        = i40evf_dev_configure,
192         .dev_start            = i40evf_dev_start,
193         .dev_stop             = i40evf_dev_stop,
194         .promiscuous_enable   = i40evf_dev_promiscuous_enable,
195         .promiscuous_disable  = i40evf_dev_promiscuous_disable,
196         .allmulticast_enable  = i40evf_dev_allmulticast_enable,
197         .allmulticast_disable = i40evf_dev_allmulticast_disable,
198         .link_update          = i40evf_dev_link_update,
199         .stats_get            = i40evf_dev_stats_get,
200         .xstats_get           = i40evf_dev_xstats_get,
201         .xstats_reset         = i40evf_dev_xstats_reset,
202         .dev_close            = i40evf_dev_close,
203         .dev_infos_get        = i40evf_dev_info_get,
204         .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
205         .vlan_filter_set      = i40evf_vlan_filter_set,
206         .vlan_offload_set     = i40evf_vlan_offload_set,
207         .vlan_pvid_set        = i40evf_vlan_pvid_set,
208         .rx_queue_start       = i40evf_dev_rx_queue_start,
209         .rx_queue_stop        = i40evf_dev_rx_queue_stop,
210         .tx_queue_start       = i40evf_dev_tx_queue_start,
211         .tx_queue_stop        = i40evf_dev_tx_queue_stop,
212         .rx_queue_setup       = i40e_dev_rx_queue_setup,
213         .rx_queue_release     = i40e_dev_rx_queue_release,
214         .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
215         .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
216         .rx_descriptor_done   = i40e_dev_rx_descriptor_done,
217         .tx_queue_setup       = i40e_dev_tx_queue_setup,
218         .tx_queue_release     = i40e_dev_tx_queue_release,
219         .mac_addr_add         = i40evf_add_mac_addr,
220         .mac_addr_remove      = i40evf_del_mac_addr,
221         .reta_update          = i40evf_dev_rss_reta_update,
222         .reta_query           = i40evf_dev_rss_reta_query,
223         .rss_hash_update      = i40evf_dev_rss_hash_update,
224         .rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
225 };
226
227 /*
228  * Parse admin queue message.
229  *
230  * return value:
231  *  < 0: meet error
232  *  0: read sys msg
233  *  > 0: read cmd result
234  */
235 static enum i40evf_aq_result
236 i40evf_parse_pfmsg(struct i40e_vf *vf,
237                    struct i40e_arq_event_info *event,
238                    struct i40evf_arq_msg_info *data)
239 {
240         enum i40e_virtchnl_ops opcode = (enum i40e_virtchnl_ops)\
241                         rte_le_to_cpu_32(event->desc.cookie_high);
242         enum i40e_status_code retval = (enum i40e_status_code)\
243                         rte_le_to_cpu_32(event->desc.cookie_low);
244         enum i40evf_aq_result ret = I40EVF_MSG_CMD;
245
246         /* pf sys event */
247         if (opcode == I40E_VIRTCHNL_OP_EVENT) {
248                 struct i40e_virtchnl_pf_event *vpe =
249                         (struct i40e_virtchnl_pf_event *)event->msg_buf;
250
251                 /* Initialize ret to sys event */
252                 ret = I40EVF_MSG_SYS;
253                 switch (vpe->event) {
254                 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
255                         vf->link_up =
256                                 vpe->event_data.link_event.link_status;
257                         vf->pend_msg |= PFMSG_LINK_CHANGE;
258                         PMD_DRV_LOG(INFO, "Link status update:%s",
259                                     vf->link_up ? "up" : "down");
260                         break;
261                 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
262                         vf->vf_reset = true;
263                         vf->pend_msg |= PFMSG_RESET_IMPENDING;
264                         PMD_DRV_LOG(INFO, "vf is reseting");
265                         break;
266                 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
267                         vf->dev_closed = true;
268                         vf->pend_msg |= PFMSG_DRIVER_CLOSE;
269                         PMD_DRV_LOG(INFO, "PF driver closed");
270                         break;
271                 default:
272                         PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
273                                     __func__, vpe->event);
274                 }
275         } else {
276                 /* async reply msg on command issued by vf previously */
277                 ret = I40EVF_MSG_CMD;
278                 /* Actual data length read from PF */
279                 data->msg_len = event->msg_len;
280         }
281         /* fill the ops and result to notify VF */
282         data->result = retval;
283         data->ops = opcode;
284
285         return ret;
286 }
287
288 /*
289  * Read data in admin queue to get msg from pf driver
290  */
291 static enum i40evf_aq_result
292 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
293 {
294         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
295         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
296         struct i40e_arq_event_info event;
297         int ret;
298         enum i40evf_aq_result result = I40EVF_MSG_NON;
299
300         event.buf_len = data->buf_len;
301         event.msg_buf = data->msg;
302         ret = i40e_clean_arq_element(hw, &event, NULL);
303         /* Can't read any msg from adminQ */
304         if (ret) {
305                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
306                         result = I40EVF_MSG_NON;
307                 else
308                         result = I40EVF_MSG_ERR;
309                 return result;
310         }
311
312         /* Parse the event */
313         result = i40evf_parse_pfmsg(vf, &event, data);
314
315         return result;
316 }
317
318 /*
319  * Polling read until command result return from pf driver or meet error.
320  */
321 static int
322 i40evf_wait_cmd_done(struct rte_eth_dev *dev,
323                      struct i40evf_arq_msg_info *data)
324 {
325         int i = 0;
326         enum i40evf_aq_result ret;
327
328 #define MAX_TRY_TIMES 20
329 #define ASQ_DELAY_MS  100
330         do {
331                 /* Delay some time first */
332                 rte_delay_ms(ASQ_DELAY_MS);
333                 ret = i40evf_read_pfmsg(dev, data);
334                 if (ret == I40EVF_MSG_CMD)
335                         return 0;
336                 else if (ret == I40EVF_MSG_ERR)
337                         return -1;
338
339                 /* If don't read msg or read sys event, continue */
340         } while(i++ < MAX_TRY_TIMES);
341
342         return -1;
343 }
344
345 /**
346  * clear current command. Only call in case execute
347  * _atomic_set_cmd successfully.
348  */
349 static inline void
350 _clear_cmd(struct i40e_vf *vf)
351 {
352         rte_wmb();
353         vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
354 }
355
356 /*
357  * Check there is pending cmd in execution. If none, set new command.
358  */
359 static inline int
360 _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
361 {
362         int ret = rte_atomic32_cmpset(&vf->pend_cmd,
363                         I40E_VIRTCHNL_OP_UNKNOWN, ops);
364
365         if (!ret)
366                 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
367
368         return !ret;
369 }
370
371 static int
372 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
373 {
374         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
375         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
376         int err = -1;
377         struct i40evf_arq_msg_info info;
378
379         if (_atomic_set_cmd(vf, args->ops))
380                 return -1;
381
382         info.msg = args->out_buffer;
383         info.buf_len = args->out_size;
384         info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
385         info.result = I40E_SUCCESS;
386
387         err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
388                      args->in_args, args->in_args_size, NULL);
389         if (err) {
390                 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
391                 _clear_cmd(vf);
392                 return err;
393         }
394
395         err = i40evf_wait_cmd_done(dev, &info);
396         /* read message and it's expected one */
397         if (!err && args->ops == info.ops)
398                 _clear_cmd(vf);
399         else if (err) {
400                 PMD_DRV_LOG(ERR, "Failed to read message from AdminQ");
401                 _clear_cmd(vf);
402         }
403         else if (args->ops != info.ops)
404                 PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
405                             args->ops, info.ops);
406
407         return err | info.result;
408 }
409
410 /*
411  * Check API version with sync wait until version read or fail from admin queue
412  */
413 static int
414 i40evf_check_api_version(struct rte_eth_dev *dev)
415 {
416         struct i40e_virtchnl_version_info version, *pver;
417         int err;
418         struct vf_cmd_info args;
419         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
420
421         version.major = I40E_VIRTCHNL_VERSION_MAJOR;
422         version.minor = I40E_VIRTCHNL_VERSION_MINOR;
423
424         args.ops = I40E_VIRTCHNL_OP_VERSION;
425         args.in_args = (uint8_t *)&version;
426         args.in_args_size = sizeof(version);
427         args.out_buffer = cmd_result_buffer;
428         args.out_size = I40E_AQ_BUF_SZ;
429
430         err = i40evf_execute_vf_cmd(dev, &args);
431         if (err) {
432                 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
433                 return err;
434         }
435
436         pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
437         vf->version_major = pver->major;
438         vf->version_minor = pver->minor;
439         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
440                 PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
441         else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
442                 (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR))
443                 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
444         else {
445                 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
446                                         vf->version_major, vf->version_minor,
447                                                 I40E_VIRTCHNL_VERSION_MAJOR,
448                                                 I40E_VIRTCHNL_VERSION_MINOR);
449                 return -1;
450         }
451
452         return 0;
453 }
454
455 static int
456 i40evf_get_vf_resource(struct rte_eth_dev *dev)
457 {
458         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
459         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
460         int err;
461         struct vf_cmd_info args;
462         uint32_t caps, len;
463
464         args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
465         args.out_buffer = cmd_result_buffer;
466         args.out_size = I40E_AQ_BUF_SZ;
467         if (PF_IS_V11(vf)) {
468                 caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
469                        I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
470                        I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
471                        I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
472                        I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
473                 args.in_args = (uint8_t *)&caps;
474                 args.in_args_size = sizeof(caps);
475         } else {
476                 args.in_args = NULL;
477                 args.in_args_size = 0;
478         }
479         err = i40evf_execute_vf_cmd(dev, &args);
480
481         if (err) {
482                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
483                 return err;
484         }
485
486         len =  sizeof(struct i40e_virtchnl_vf_resource) +
487                 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
488
489         (void)rte_memcpy(vf->vf_res, args.out_buffer,
490                         RTE_MIN(args.out_size, len));
491         i40e_vf_parse_hw_config(hw, vf->vf_res);
492
493         return 0;
494 }
495
496 static int
497 i40evf_config_promisc(struct rte_eth_dev *dev,
498                       bool enable_unicast,
499                       bool enable_multicast)
500 {
501         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
502         int err;
503         struct vf_cmd_info args;
504         struct i40e_virtchnl_promisc_info promisc;
505
506         promisc.flags = 0;
507         promisc.vsi_id = vf->vsi_res->vsi_id;
508
509         if (enable_unicast)
510                 promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
511
512         if (enable_multicast)
513                 promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
514
515         args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
516         args.in_args = (uint8_t *)&promisc;
517         args.in_args_size = sizeof(promisc);
518         args.out_buffer = cmd_result_buffer;
519         args.out_size = I40E_AQ_BUF_SZ;
520
521         err = i40evf_execute_vf_cmd(dev, &args);
522
523         if (err)
524                 PMD_DRV_LOG(ERR, "fail to execute command "
525                             "CONFIG_PROMISCUOUS_MODE");
526         return err;
527 }
528
529 /* Configure vlan and double vlan offload. Use flag to specify which part to configure */
530 static int
531 i40evf_config_vlan_offload(struct rte_eth_dev *dev,
532                                 bool enable_vlan_strip)
533 {
534         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
535         int err;
536         struct vf_cmd_info args;
537         struct i40e_virtchnl_vlan_offload_info offload;
538
539         offload.vsi_id = vf->vsi_res->vsi_id;
540         offload.enable_vlan_strip = enable_vlan_strip;
541
542         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
543         args.in_args = (uint8_t *)&offload;
544         args.in_args_size = sizeof(offload);
545         args.out_buffer = cmd_result_buffer;
546         args.out_size = I40E_AQ_BUF_SZ;
547
548         err = i40evf_execute_vf_cmd(dev, &args);
549         if (err)
550                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
551
552         return err;
553 }
554
555 static int
556 i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
557                                 struct i40e_vsi_vlan_pvid_info *info)
558 {
559         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
560         int err;
561         struct vf_cmd_info args;
562         struct i40e_virtchnl_pvid_info tpid_info;
563
564         if (dev == NULL || info == NULL) {
565                 PMD_DRV_LOG(ERR, "invalid parameters");
566                 return I40E_ERR_PARAM;
567         }
568
569         memset(&tpid_info, 0, sizeof(tpid_info));
570         tpid_info.vsi_id = vf->vsi_res->vsi_id;
571         (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
572
573         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
574         args.in_args = (uint8_t *)&tpid_info;
575         args.in_args_size = sizeof(tpid_info);
576         args.out_buffer = cmd_result_buffer;
577         args.out_size = I40E_AQ_BUF_SZ;
578
579         err = i40evf_execute_vf_cmd(dev, &args);
580         if (err)
581                 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
582
583         return err;
584 }
585
586 static void
587 i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
588                                   uint16_t vsi_id,
589                                   uint16_t queue_id,
590                                   uint16_t nb_txq,
591                                   struct i40e_tx_queue *txq)
592 {
593         txq_info->vsi_id = vsi_id;
594         txq_info->queue_id = queue_id;
595         if (queue_id < nb_txq) {
596                 txq_info->ring_len = txq->nb_tx_desc;
597                 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
598         }
599 }
600
601 static void
602 i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
603                                   uint16_t vsi_id,
604                                   uint16_t queue_id,
605                                   uint16_t nb_rxq,
606                                   uint32_t max_pkt_size,
607                                   struct i40e_rx_queue *rxq)
608 {
609         rxq_info->vsi_id = vsi_id;
610         rxq_info->queue_id = queue_id;
611         rxq_info->max_pkt_size = max_pkt_size;
612         if (queue_id < nb_rxq) {
613                 rxq_info->ring_len = rxq->nb_rx_desc;
614                 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
615                 rxq_info->databuffer_size =
616                         (rte_pktmbuf_data_room_size(rxq->mp) -
617                                 RTE_PKTMBUF_HEADROOM);
618         }
619 }
620
621 /* It configures VSI queues to co-work with Linux PF host */
622 static int
623 i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
624 {
625         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
626         struct i40e_rx_queue **rxq =
627                 (struct i40e_rx_queue **)dev->data->rx_queues;
628         struct i40e_tx_queue **txq =
629                 (struct i40e_tx_queue **)dev->data->tx_queues;
630         struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
631         struct i40e_virtchnl_queue_pair_info *vc_qpi;
632         struct vf_cmd_info args;
633         uint16_t i, nb_qp = vf->num_queue_pairs;
634         const uint32_t size =
635                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
636         uint8_t buff[size];
637         int ret;
638
639         memset(buff, 0, sizeof(buff));
640         vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
641         vc_vqci->vsi_id = vf->vsi_res->vsi_id;
642         vc_vqci->num_queue_pairs = nb_qp;
643
644         for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
645                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
646                         vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
647                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
648                         vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
649                                         vf->max_pkt_len, rxq[i]);
650         }
651         memset(&args, 0, sizeof(args));
652         args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
653         args.in_args = (uint8_t *)vc_vqci;
654         args.in_args_size = size;
655         args.out_buffer = cmd_result_buffer;
656         args.out_size = I40E_AQ_BUF_SZ;
657         ret = i40evf_execute_vf_cmd(dev, &args);
658         if (ret)
659                 PMD_DRV_LOG(ERR, "Failed to execute command of "
660                         "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
661
662         return ret;
663 }
664
665 /* It configures VSI queues to co-work with DPDK PF host */
666 static int
667 i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
668 {
669         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
670         struct i40e_rx_queue **rxq =
671                 (struct i40e_rx_queue **)dev->data->rx_queues;
672         struct i40e_tx_queue **txq =
673                 (struct i40e_tx_queue **)dev->data->tx_queues;
674         struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
675         struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
676         struct vf_cmd_info args;
677         uint16_t i, nb_qp = vf->num_queue_pairs;
678         const uint32_t size =
679                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
680         uint8_t buff[size];
681         int ret;
682
683         memset(buff, 0, sizeof(buff));
684         vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
685         vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
686         vc_vqcei->num_queue_pairs = nb_qp;
687         vc_qpei = vc_vqcei->qpair;
688         for (i = 0; i < nb_qp; i++, vc_qpei++) {
689                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
690                         vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
691                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
692                         vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
693                                         vf->max_pkt_len, rxq[i]);
694                 if (i < dev->data->nb_rx_queues)
695                         /*
696                          * It adds extra info for configuring VSI queues, which
697                          * is needed to enable the configurable crc stripping
698                          * in VF.
699                          */
700                         vc_qpei->rxq_ext.crcstrip =
701                                 dev->data->dev_conf.rxmode.hw_strip_crc;
702         }
703         memset(&args, 0, sizeof(args));
704         args.ops =
705                 (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
706         args.in_args = (uint8_t *)vc_vqcei;
707         args.in_args_size = size;
708         args.out_buffer = cmd_result_buffer;
709         args.out_size = I40E_AQ_BUF_SZ;
710         ret = i40evf_execute_vf_cmd(dev, &args);
711         if (ret)
712                 PMD_DRV_LOG(ERR, "Failed to execute command of "
713                         "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
714
715         return ret;
716 }
717
718 static int
719 i40evf_configure_queues(struct rte_eth_dev *dev)
720 {
721         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
722
723         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
724                 /* To support DPDK PF host */
725                 return i40evf_configure_vsi_queues_ext(dev);
726         else
727                 /* To support Linux PF host */
728                 return i40evf_configure_vsi_queues(dev);
729 }
730
731 static int
732 i40evf_config_irq_map(struct rte_eth_dev *dev)
733 {
734         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
735         struct vf_cmd_info args;
736         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
737                 sizeof(struct i40e_virtchnl_vector_map)];
738         struct i40e_virtchnl_irq_map_info *map_info;
739         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
740         uint32_t vector_id;
741         int i, err;
742
743         if (rte_intr_allow_others(intr_handle)) {
744                 if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
745                         vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
746                 else
747                         vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX;
748         } else {
749                 vector_id = I40E_MISC_VEC_ID;
750         }
751
752         map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
753         map_info->num_vectors = 1;
754         map_info->vecmap[0].rxitr_idx = I40E_QINT_RQCTL_MSIX_INDX_NOITR;
755         map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
756         /* Alway use default dynamic MSIX interrupt */
757         map_info->vecmap[0].vector_id = vector_id;
758         /* Don't map any tx queue */
759         map_info->vecmap[0].txq_map = 0;
760         map_info->vecmap[0].rxq_map = 0;
761         for (i = 0; i < dev->data->nb_rx_queues; i++) {
762                 map_info->vecmap[0].rxq_map |= 1 << i;
763                 if (rte_intr_dp_is_en(intr_handle))
764                         intr_handle->intr_vec[i] = vector_id;
765         }
766
767         args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
768         args.in_args = (u8 *)cmd_buffer;
769         args.in_args_size = sizeof(cmd_buffer);
770         args.out_buffer = cmd_result_buffer;
771         args.out_size = I40E_AQ_BUF_SZ;
772         err = i40evf_execute_vf_cmd(dev, &args);
773         if (err)
774                 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
775
776         return err;
777 }
778
779 static int
780 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
781                                 bool on)
782 {
783         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
784         struct i40e_virtchnl_queue_select queue_select;
785         int err;
786         struct vf_cmd_info args;
787         memset(&queue_select, 0, sizeof(queue_select));
788         queue_select.vsi_id = vf->vsi_res->vsi_id;
789
790         if (isrx)
791                 queue_select.rx_queues |= 1 << qid;
792         else
793                 queue_select.tx_queues |= 1 << qid;
794
795         if (on)
796                 args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
797         else
798                 args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
799         args.in_args = (u8 *)&queue_select;
800         args.in_args_size = sizeof(queue_select);
801         args.out_buffer = cmd_result_buffer;
802         args.out_size = I40E_AQ_BUF_SZ;
803         err = i40evf_execute_vf_cmd(dev, &args);
804         if (err)
805                 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
806                             isrx ? "RX" : "TX", qid, on ? "on" : "off");
807
808         return err;
809 }
810
811 static int
812 i40evf_start_queues(struct rte_eth_dev *dev)
813 {
814         struct rte_eth_dev_data *dev_data = dev->data;
815         int i;
816         struct i40e_rx_queue *rxq;
817         struct i40e_tx_queue *txq;
818
819         for (i = 0; i < dev->data->nb_rx_queues; i++) {
820                 rxq = dev_data->rx_queues[i];
821                 if (rxq->rx_deferred_start)
822                         continue;
823                 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
824                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
825                         return -1;
826                 }
827         }
828
829         for (i = 0; i < dev->data->nb_tx_queues; i++) {
830                 txq = dev_data->tx_queues[i];
831                 if (txq->tx_deferred_start)
832                         continue;
833                 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
834                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
835                         return -1;
836                 }
837         }
838
839         return 0;
840 }
841
842 static int
843 i40evf_stop_queues(struct rte_eth_dev *dev)
844 {
845         int i;
846
847         /* Stop TX queues first */
848         for (i = 0; i < dev->data->nb_tx_queues; i++) {
849                 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
850                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
851                         return -1;
852                 }
853         }
854
855         /* Then stop RX queues */
856         for (i = 0; i < dev->data->nb_rx_queues; i++) {
857                 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
858                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
859                         return -1;
860                 }
861         }
862
863         return 0;
864 }
865
866 static void
867 i40evf_add_mac_addr(struct rte_eth_dev *dev,
868                     struct ether_addr *addr,
869                     __rte_unused uint32_t index,
870                     __rte_unused uint32_t pool)
871 {
872         struct i40e_virtchnl_ether_addr_list *list;
873         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
874         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
875                         sizeof(struct i40e_virtchnl_ether_addr)];
876         int err;
877         struct vf_cmd_info args;
878
879         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
880                 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
881                             addr->addr_bytes[0], addr->addr_bytes[1],
882                             addr->addr_bytes[2], addr->addr_bytes[3],
883                             addr->addr_bytes[4], addr->addr_bytes[5]);
884                 return;
885         }
886
887         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
888         list->vsi_id = vf->vsi_res->vsi_id;
889         list->num_elements = 1;
890         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
891                                         sizeof(addr->addr_bytes));
892
893         args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
894         args.in_args = cmd_buffer;
895         args.in_args_size = sizeof(cmd_buffer);
896         args.out_buffer = cmd_result_buffer;
897         args.out_size = I40E_AQ_BUF_SZ;
898         err = i40evf_execute_vf_cmd(dev, &args);
899         if (err)
900                 PMD_DRV_LOG(ERR, "fail to execute command "
901                             "OP_ADD_ETHER_ADDRESS");
902
903         return;
904 }
905
906 static void
907 i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
908 {
909         struct i40e_virtchnl_ether_addr_list *list;
910         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
911         struct rte_eth_dev_data *data = dev->data;
912         struct ether_addr *addr;
913         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
914                         sizeof(struct i40e_virtchnl_ether_addr)];
915         int err;
916         struct vf_cmd_info args;
917
918         addr = &(data->mac_addrs[index]);
919
920         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
921                 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
922                             addr->addr_bytes[0], addr->addr_bytes[1],
923                             addr->addr_bytes[2], addr->addr_bytes[3],
924                             addr->addr_bytes[4], addr->addr_bytes[5]);
925                 return;
926         }
927
928         list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
929         list->vsi_id = vf->vsi_res->vsi_id;
930         list->num_elements = 1;
931         (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
932                         sizeof(addr->addr_bytes));
933
934         args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
935         args.in_args = cmd_buffer;
936         args.in_args_size = sizeof(cmd_buffer);
937         args.out_buffer = cmd_result_buffer;
938         args.out_size = I40E_AQ_BUF_SZ;
939         err = i40evf_execute_vf_cmd(dev, &args);
940         if (err)
941                 PMD_DRV_LOG(ERR, "fail to execute command "
942                             "OP_DEL_ETHER_ADDRESS");
943         return;
944 }
945
946 static int
947 i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
948 {
949         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
950         struct i40e_virtchnl_queue_select q_stats;
951         int err;
952         struct vf_cmd_info args;
953
954         memset(&q_stats, 0, sizeof(q_stats));
955         q_stats.vsi_id = vf->vsi_res->vsi_id;
956         args.ops = I40E_VIRTCHNL_OP_GET_STATS;
957         args.in_args = (u8 *)&q_stats;
958         args.in_args_size = sizeof(q_stats);
959         args.out_buffer = cmd_result_buffer;
960         args.out_size = I40E_AQ_BUF_SZ;
961
962         err = i40evf_execute_vf_cmd(dev, &args);
963         if (err) {
964                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
965                 *pstats = NULL;
966                 return err;
967         }
968         *pstats = (struct i40e_eth_stats *)args.out_buffer;
969         return 0;
970 }
971
972 static int
973 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
974 {
975         int ret;
976         struct i40e_eth_stats *pstats = NULL;
977
978         ret = i40evf_update_stats(dev, &pstats);
979         if (ret != 0)
980                 return 0;
981
982         stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
983                                                 pstats->rx_broadcast;
984         stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
985                                                 pstats->tx_unicast;
986         stats->ierrors = pstats->rx_discards;
987         stats->oerrors = pstats->tx_errors + pstats->tx_discards;
988         stats->ibytes = pstats->rx_bytes;
989         stats->obytes = pstats->tx_bytes;
990
991         return 0;
992 }
993
994 static void
995 i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
996 {
997         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
998         struct i40e_eth_stats *pstats = NULL;
999
1000         /* read stat values to clear hardware registers */
1001         i40evf_update_stats(dev, &pstats);
1002
1003         /* set stats offset base on current values */
1004         vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
1005 }
1006
1007 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
1008                                  struct rte_eth_xstats *xstats, unsigned n)
1009 {
1010         int ret;
1011         unsigned i;
1012         struct i40e_eth_stats *pstats = NULL;
1013
1014         if (n < I40EVF_NB_XSTATS)
1015                 return I40EVF_NB_XSTATS;
1016
1017         ret = i40evf_update_stats(dev, &pstats);
1018         if (ret != 0)
1019                 return 0;
1020
1021         if (!xstats)
1022                 return 0;
1023
1024         /* loop over xstats array and values from pstats */
1025         for (i = 0; i < I40EVF_NB_XSTATS; i++) {
1026                 snprintf(xstats[i].name, sizeof(xstats[i].name),
1027                          "%s", rte_i40evf_stats_strings[i].name);
1028                 xstats[i].value = *(uint64_t *)(((char *)pstats) +
1029                         rte_i40evf_stats_strings[i].offset);
1030         }
1031
1032         return I40EVF_NB_XSTATS;
1033 }
1034
1035 static int
1036 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1037 {
1038         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1039         struct i40e_virtchnl_vlan_filter_list *vlan_list;
1040         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
1041                                                         sizeof(uint16_t)];
1042         int err;
1043         struct vf_cmd_info args;
1044
1045         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
1046         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1047         vlan_list->num_elements = 1;
1048         vlan_list->vlan_id[0] = vlanid;
1049
1050         args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
1051         args.in_args = (u8 *)&cmd_buffer;
1052         args.in_args_size = sizeof(cmd_buffer);
1053         args.out_buffer = cmd_result_buffer;
1054         args.out_size = I40E_AQ_BUF_SZ;
1055         err = i40evf_execute_vf_cmd(dev, &args);
1056         if (err)
1057                 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
1058
1059         return err;
1060 }
1061
1062 static int
1063 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1064 {
1065         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1066         struct i40e_virtchnl_vlan_filter_list *vlan_list;
1067         uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
1068                                                         sizeof(uint16_t)];
1069         int err;
1070         struct vf_cmd_info args;
1071
1072         vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
1073         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1074         vlan_list->num_elements = 1;
1075         vlan_list->vlan_id[0] = vlanid;
1076
1077         args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
1078         args.in_args = (u8 *)&cmd_buffer;
1079         args.in_args_size = sizeof(cmd_buffer);
1080         args.out_buffer = cmd_result_buffer;
1081         args.out_size = I40E_AQ_BUF_SZ;
1082         err = i40evf_execute_vf_cmd(dev, &args);
1083         if (err)
1084                 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
1085
1086         return err;
1087 }
1088
1089 static int
1090 i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
1091 {
1092         int err;
1093         struct vf_cmd_info args;
1094         struct rte_eth_link *new_link;
1095
1096         args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
1097         args.in_args = NULL;
1098         args.in_args_size = 0;
1099         args.out_buffer = cmd_result_buffer;
1100         args.out_size = I40E_AQ_BUF_SZ;
1101         err = i40evf_execute_vf_cmd(dev, &args);
1102         if (err) {
1103                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
1104                 return err;
1105         }
1106
1107         new_link = (struct rte_eth_link *)args.out_buffer;
1108         (void)rte_memcpy(link, new_link, sizeof(*link));
1109
1110         return 0;
1111 }
1112
1113 static const struct rte_pci_id pci_id_i40evf_map[] = {
1114 #define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
1115 #include "rte_pci_dev_ids.h"
1116 { .vendor_id = 0, /* sentinel */ },
1117 };
1118
1119 static inline int
1120 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
1121                                     struct rte_eth_link *link)
1122 {
1123         struct rte_eth_link *dst = &(dev->data->dev_link);
1124         struct rte_eth_link *src = link;
1125
1126         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1127                                         *(uint64_t *)src) == 0)
1128                 return -1;
1129
1130         return 0;
1131 }
1132
1133 static int
1134 i40evf_reset_vf(struct i40e_hw *hw)
1135 {
1136         int i, reset;
1137
1138         if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1139                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1140                 return -1;
1141         }
1142         /**
1143           * After issuing vf reset command to pf, pf won't necessarily
1144           * reset vf, it depends on what state it exactly is. If it's not
1145           * initialized yet, it won't have vf reset since it's in a certain
1146           * state. If not, it will try to reset. Even vf is reset, pf will
1147           * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1148           * it to ACTIVE. In this duration, vf may not catch the moment that
1149           * COMPLETE is set. So, for vf, we'll try to wait a long time.
1150           */
1151         rte_delay_ms(200);
1152
1153         for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1154                 reset = rd32(hw, I40E_VFGEN_RSTAT) &
1155                         I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1156                 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1157                 if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
1158                         break;
1159                 else
1160                         rte_delay_ms(50);
1161         }
1162
1163         if (i >= MAX_RESET_WAIT_CNT) {
1164                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1165                 return -1;
1166         }
1167
1168         return 0;
1169 }
1170
1171 static int
1172 i40evf_init_vf(struct rte_eth_dev *dev)
1173 {
1174         int i, err, bufsz;
1175         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1176         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1177         struct ether_addr *p_mac_addr;
1178
1179         vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1180         vf->dev_data = dev->data;
1181         err = i40e_set_mac_type(hw);
1182         if (err) {
1183                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1184                 goto err;
1185         }
1186
1187         i40e_init_adminq_parameter(hw);
1188         err = i40e_init_adminq(hw);
1189         if (err) {
1190                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1191                 goto err;
1192         }
1193
1194
1195         /* Reset VF and wait until it's complete */
1196         if (i40evf_reset_vf(hw)) {
1197                 PMD_INIT_LOG(ERR, "reset NIC failed");
1198                 goto err_aq;
1199         }
1200
1201         /* VF reset, shutdown admin queue and initialize again */
1202         if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1203                 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1204                 return -1;
1205         }
1206
1207         i40e_init_adminq_parameter(hw);
1208         if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1209                 PMD_INIT_LOG(ERR, "init_adminq failed");
1210                 return -1;
1211         }
1212         if (i40evf_check_api_version(dev) != 0) {
1213                 PMD_INIT_LOG(ERR, "check_api version failed");
1214                 goto err_aq;
1215         }
1216         bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1217                 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1218         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1219         if (!vf->vf_res) {
1220                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1221                         goto err_aq;
1222         }
1223
1224         if (i40evf_get_vf_resource(dev) != 0) {
1225                 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1226                 goto err_alloc;
1227         }
1228
1229         /* got VF config message back from PF, now we can parse it */
1230         for (i = 0; i < vf->vf_res->num_vsis; i++) {
1231                 if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
1232                         vf->vsi_res = &vf->vf_res->vsi_res[i];
1233         }
1234
1235         if (!vf->vsi_res) {
1236                 PMD_INIT_LOG(ERR, "no LAN VSI found");
1237                 goto err_alloc;
1238         }
1239
1240         if (hw->mac.type == I40E_MAC_X722_VF)
1241                 vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
1242         vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1243         vf->vsi.type = vf->vsi_res->vsi_type;
1244         vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1245         vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1246
1247         /* Store the MAC address configured by host, or generate random one */
1248         p_mac_addr = (struct ether_addr *)(vf->vsi_res->default_mac_addr);
1249         if (is_valid_assigned_ether_addr(p_mac_addr)) /* Configured by host */
1250                 ether_addr_copy(p_mac_addr, (struct ether_addr *)hw->mac.addr);
1251         else
1252                 eth_random_addr(hw->mac.addr); /* Generate a random one */
1253
1254         return 0;
1255
1256 err_alloc:
1257         rte_free(vf->vf_res);
1258 err_aq:
1259         i40e_shutdown_adminq(hw); /* ignore error */
1260 err:
1261         return -1;
1262 }
1263
1264 static int
1265 i40evf_uninit_vf(struct rte_eth_dev *dev)
1266 {
1267         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1268         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1269
1270         PMD_INIT_FUNC_TRACE();
1271
1272         if (hw->adapter_stopped == 0)
1273                 i40evf_dev_close(dev);
1274         rte_free(vf->vf_res);
1275         vf->vf_res = NULL;
1276
1277         return 0;
1278 }
1279
1280 static int
1281 i40evf_dev_init(struct rte_eth_dev *eth_dev)
1282 {
1283         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
1284                         eth_dev->data->dev_private);
1285
1286         PMD_INIT_FUNC_TRACE();
1287
1288         /* assign ops func pointer */
1289         eth_dev->dev_ops = &i40evf_eth_dev_ops;
1290         eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1291         eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1292
1293         /*
1294          * For secondary processes, we don't initialise any further as primary
1295          * has already done this work.
1296          */
1297         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1298                 i40e_set_rx_function(eth_dev);
1299                 i40e_set_tx_function(eth_dev);
1300                 return 0;
1301         }
1302
1303         rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
1304
1305         hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
1306         hw->device_id = eth_dev->pci_dev->id.device_id;
1307         hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
1308         hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
1309         hw->bus.device = eth_dev->pci_dev->addr.devid;
1310         hw->bus.func = eth_dev->pci_dev->addr.function;
1311         hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1312         hw->adapter_stopped = 0;
1313
1314         if(i40evf_init_vf(eth_dev) != 0) {
1315                 PMD_INIT_LOG(ERR, "Init vf failed");
1316                 return -1;
1317         }
1318
1319         /* allocate memory for mac addr storage */
1320         eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1321                                         ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
1322                                         0);
1323         if (eth_dev->data->mac_addrs == NULL) {
1324                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1325                                 " store MAC addresses",
1326                                 ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
1327                 return -ENOMEM;
1328         }
1329         ether_addr_copy((struct ether_addr *)hw->mac.addr,
1330                         &eth_dev->data->mac_addrs[0]);
1331
1332         return 0;
1333 }
1334
1335 static int
1336 i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
1337 {
1338         PMD_INIT_FUNC_TRACE();
1339
1340         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1341                 return -EPERM;
1342
1343         eth_dev->dev_ops = NULL;
1344         eth_dev->rx_pkt_burst = NULL;
1345         eth_dev->tx_pkt_burst = NULL;
1346
1347         if (i40evf_uninit_vf(eth_dev) != 0) {
1348                 PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
1349                 return -1;
1350         }
1351
1352         rte_free(eth_dev->data->mac_addrs);
1353         eth_dev->data->mac_addrs = NULL;
1354
1355         return 0;
1356 }
1357 /*
1358  * virtual function driver struct
1359  */
1360 static struct eth_driver rte_i40evf_pmd = {
1361         .pci_drv = {
1362                 .name = "rte_i40evf_pmd",
1363                 .id_table = pci_id_i40evf_map,
1364                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
1365         },
1366         .eth_dev_init = i40evf_dev_init,
1367         .eth_dev_uninit = i40evf_dev_uninit,
1368         .dev_private_size = sizeof(struct i40e_adapter),
1369 };
1370
1371 /*
1372  * VF Driver initialization routine.
1373  * Invoked one at EAL init time.
1374  * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
1375  */
1376 static int
1377 rte_i40evf_pmd_init(const char *name __rte_unused,
1378                     const char *params __rte_unused)
1379 {
1380         PMD_INIT_FUNC_TRACE();
1381
1382         rte_eth_driver_register(&rte_i40evf_pmd);
1383
1384         return 0;
1385 }
1386
1387 static struct rte_driver rte_i40evf_driver = {
1388         .type = PMD_PDEV,
1389         .init = rte_i40evf_pmd_init,
1390 };
1391
1392 PMD_REGISTER_DRIVER(rte_i40evf_driver);
1393
1394 static int
1395 i40evf_dev_configure(struct rte_eth_dev *dev)
1396 {
1397         struct i40e_adapter *ad =
1398                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1399
1400         /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1401          * allocation or vector Rx preconditions we will reset it.
1402          */
1403         ad->rx_bulk_alloc_allowed = true;
1404         ad->rx_vec_allowed = true;
1405         ad->tx_simple_allowed = true;
1406         ad->tx_vec_allowed = true;
1407
1408         return i40evf_init_vlan(dev);
1409 }
1410
1411 static int
1412 i40evf_init_vlan(struct rte_eth_dev *dev)
1413 {
1414         struct rte_eth_dev_data *data = dev->data;
1415         int ret;
1416
1417         /* Apply vlan offload setting */
1418         i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1419
1420         /* Apply pvid setting */
1421         ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
1422                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
1423         return ret;
1424 }
1425
1426 static void
1427 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1428 {
1429         bool enable_vlan_strip = 0;
1430         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1431         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1432
1433         /* Linux pf host doesn't support vlan offload yet */
1434         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1435                 /* Vlan stripping setting */
1436                 if (mask & ETH_VLAN_STRIP_MASK) {
1437                         /* Enable or disable VLAN stripping */
1438                         if (dev_conf->rxmode.hw_vlan_strip)
1439                                 enable_vlan_strip = 1;
1440                         else
1441                                 enable_vlan_strip = 0;
1442
1443                         i40evf_config_vlan_offload(dev, enable_vlan_strip);
1444                 }
1445         }
1446 }
1447
1448 static int
1449 i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1450 {
1451         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1452         struct i40e_vsi_vlan_pvid_info info;
1453         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1454
1455         memset(&info, 0, sizeof(info));
1456         info.on = on;
1457
1458         /* Linux pf host don't support vlan offload yet */
1459         if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1460                 if (info.on)
1461                         info.config.pvid = pvid;
1462                 else {
1463                         info.config.reject.tagged =
1464                                 dev_conf->txmode.hw_vlan_reject_tagged;
1465                         info.config.reject.untagged =
1466                                 dev_conf->txmode.hw_vlan_reject_untagged;
1467                 }
1468                 return i40evf_config_vlan_pvid(dev, &info);
1469         }
1470
1471         return 0;
1472 }
1473
1474 static int
1475 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1476 {
1477         struct i40e_rx_queue *rxq;
1478         int err = 0;
1479         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1480
1481         PMD_INIT_FUNC_TRACE();
1482
1483         if (rx_queue_id < dev->data->nb_rx_queues) {
1484                 rxq = dev->data->rx_queues[rx_queue_id];
1485
1486                 err = i40e_alloc_rx_queue_mbufs(rxq);
1487                 if (err) {
1488                         PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1489                         return err;
1490                 }
1491
1492                 rte_wmb();
1493
1494                 /* Init the RX tail register. */
1495                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1496                 I40EVF_WRITE_FLUSH(hw);
1497
1498                 /* Ready to switch the queue on */
1499                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1500
1501                 if (err)
1502                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1503                                     rx_queue_id);
1504                 else
1505                         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1506         }
1507
1508         return err;
1509 }
1510
1511 static int
1512 i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1513 {
1514         struct i40e_rx_queue *rxq;
1515         int err;
1516
1517         if (rx_queue_id < dev->data->nb_rx_queues) {
1518                 rxq = dev->data->rx_queues[rx_queue_id];
1519
1520                 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1521
1522                 if (err) {
1523                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1524                                     rx_queue_id);
1525                         return err;
1526                 }
1527
1528                 i40e_rx_queue_release_mbufs(rxq);
1529                 i40e_reset_rx_queue(rxq);
1530                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1531         }
1532
1533         return 0;
1534 }
1535
1536 static int
1537 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1538 {
1539         int err = 0;
1540
1541         PMD_INIT_FUNC_TRACE();
1542
1543         if (tx_queue_id < dev->data->nb_tx_queues) {
1544
1545                 /* Ready to switch the queue on */
1546                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1547
1548                 if (err)
1549                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1550                                     tx_queue_id);
1551                 else
1552                         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1553         }
1554
1555         return err;
1556 }
1557
1558 static int
1559 i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1560 {
1561         struct i40e_tx_queue *txq;
1562         int err;
1563
1564         if (tx_queue_id < dev->data->nb_tx_queues) {
1565                 txq = dev->data->tx_queues[tx_queue_id];
1566
1567                 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1568
1569                 if (err) {
1570                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1571                                     tx_queue_id);
1572                         return err;
1573                 }
1574
1575                 i40e_tx_queue_release_mbufs(txq);
1576                 i40e_reset_tx_queue(txq);
1577                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1578         }
1579
1580         return 0;
1581 }
1582
1583 static int
1584 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1585 {
1586         int ret;
1587
1588         if (on)
1589                 ret = i40evf_add_vlan(dev, vlan_id);
1590         else
1591                 ret = i40evf_del_vlan(dev,vlan_id);
1592
1593         return ret;
1594 }
1595
1596 static int
1597 i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
1598 {
1599         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1600         struct rte_eth_dev_data *dev_data = dev->data;
1601         struct rte_pktmbuf_pool_private *mbp_priv;
1602         uint16_t buf_size, len;
1603
1604         rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
1605         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1606         I40EVF_WRITE_FLUSH(hw);
1607
1608         /* Calculate the maximum packet length allowed */
1609         mbp_priv = rte_mempool_get_priv(rxq->mp);
1610         buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
1611                                         RTE_PKTMBUF_HEADROOM);
1612         rxq->hs_mode = i40e_header_split_none;
1613         rxq->rx_hdr_len = 0;
1614         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
1615         len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
1616         rxq->max_pkt_len = RTE_MIN(len,
1617                 dev_data->dev_conf.rxmode.max_rx_pkt_len);
1618
1619         /**
1620          * Check if the jumbo frame and maximum packet length are set correctly
1621          */
1622         if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
1623                 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
1624                     rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1625                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1626                                 "larger than %u and smaller than %u, as jumbo "
1627                                 "frame is enabled", (uint32_t)ETHER_MAX_LEN,
1628                                         (uint32_t)I40E_FRAME_SIZE_MAX);
1629                         return I40E_ERR_CONFIG;
1630                 }
1631         } else {
1632                 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
1633                     rxq->max_pkt_len > ETHER_MAX_LEN) {
1634                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1635                                 "larger than %u and smaller than %u, as jumbo "
1636                                 "frame is disabled", (uint32_t)ETHER_MIN_LEN,
1637                                                 (uint32_t)ETHER_MAX_LEN);
1638                         return I40E_ERR_CONFIG;
1639                 }
1640         }
1641
1642         if (dev_data->dev_conf.rxmode.enable_scatter ||
1643             (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
1644                 dev_data->scattered_rx = 1;
1645         }
1646
1647         return 0;
1648 }
1649
1650 static int
1651 i40evf_rx_init(struct rte_eth_dev *dev)
1652 {
1653         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1654         uint16_t i;
1655         int ret = I40E_SUCCESS;
1656         struct i40e_rx_queue **rxq =
1657                 (struct i40e_rx_queue **)dev->data->rx_queues;
1658
1659         i40evf_config_rss(vf);
1660         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1661                 if (!rxq[i] || !rxq[i]->q_set)
1662                         continue;
1663                 ret = i40evf_rxq_init(dev, rxq[i]);
1664                 if (ret != I40E_SUCCESS)
1665                         break;
1666         }
1667         if (ret == I40E_SUCCESS)
1668                 i40e_set_rx_function(dev);
1669
1670         return ret;
1671 }
1672
1673 static void
1674 i40evf_tx_init(struct rte_eth_dev *dev)
1675 {
1676         uint16_t i;
1677         struct i40e_tx_queue **txq =
1678                 (struct i40e_tx_queue **)dev->data->tx_queues;
1679         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1680
1681         for (i = 0; i < dev->data->nb_tx_queues; i++)
1682                 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1683
1684         i40e_set_tx_function(dev);
1685 }
1686
1687 static inline void
1688 i40evf_enable_queues_intr(struct rte_eth_dev *dev)
1689 {
1690         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1691         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1692         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1693
1694         if (!rte_intr_allow_others(intr_handle)) {
1695                 I40E_WRITE_REG(hw,
1696                                I40E_VFINT_DYN_CTL01,
1697                                I40E_VFINT_DYN_CTL01_INTENA_MASK |
1698                                I40E_VFINT_DYN_CTL01_CLEARPBA_MASK);
1699                 I40EVF_WRITE_FLUSH(hw);
1700                 return;
1701         }
1702
1703         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1704                 /* To support DPDK PF host */
1705                 I40E_WRITE_REG(hw,
1706                         I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1707                         I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1708                         I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1709         else
1710                 /* To support Linux PF host */
1711                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1712                                 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1713                                 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK);
1714
1715         I40EVF_WRITE_FLUSH(hw);
1716 }
1717
1718 static inline void
1719 i40evf_disable_queues_intr(struct rte_eth_dev *dev)
1720 {
1721         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1722         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1724
1725         if (!rte_intr_allow_others(intr_handle)) {
1726                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1727                 I40EVF_WRITE_FLUSH(hw);
1728                 return;
1729         }
1730
1731         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1732                 I40E_WRITE_REG(hw,
1733                                I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR
1734                                                     - 1),
1735                                0);
1736         else
1737                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1738
1739         I40EVF_WRITE_FLUSH(hw);
1740 }
1741
1742 static int
1743 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1744 {
1745         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1746         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1747         uint16_t interval =
1748                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1749         uint16_t msix_intr;
1750
1751         msix_intr = intr_handle->intr_vec[queue_id];
1752         if (msix_intr == I40E_MISC_VEC_ID)
1753                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1754                                I40E_VFINT_DYN_CTL01_INTENA_MASK |
1755                                I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1756                                (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
1757                                (interval <<
1758                                 I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT));
1759         else
1760                 I40E_WRITE_REG(hw,
1761                                I40E_VFINT_DYN_CTLN1(msix_intr -
1762                                                     I40E_RX_VEC_START),
1763                                I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1764                                I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1765                                (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1766                                (interval <<
1767                                 I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
1768
1769         I40EVF_WRITE_FLUSH(hw);
1770
1771         rte_intr_enable(&dev->pci_dev->intr_handle);
1772
1773         return 0;
1774 }
1775
1776 static int
1777 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1778 {
1779         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1780         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1781         uint16_t msix_intr;
1782
1783         msix_intr = intr_handle->intr_vec[queue_id];
1784         if (msix_intr == I40E_MISC_VEC_ID)
1785                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1786         else
1787                 I40E_WRITE_REG(hw,
1788                                I40E_VFINT_DYN_CTLN1(msix_intr -
1789                                                     I40E_RX_VEC_START),
1790                                0);
1791
1792         I40EVF_WRITE_FLUSH(hw);
1793
1794         return 0;
1795 }
1796
1797 static void
1798 i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
1799 {
1800         struct i40e_virtchnl_ether_addr_list *list;
1801         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1802         int err, i, j;
1803         int next_begin = 0;
1804         int begin = 0;
1805         uint32_t len;
1806         struct ether_addr *addr;
1807         struct vf_cmd_info args;
1808
1809         do {
1810                 j = 0;
1811                 len = sizeof(struct i40e_virtchnl_ether_addr_list);
1812                 for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
1813                         if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
1814                                 continue;
1815                         len += sizeof(struct i40e_virtchnl_ether_addr);
1816                         if (len >= I40E_AQ_BUF_SZ) {
1817                                 next_begin = i + 1;
1818                                 break;
1819                         }
1820                 }
1821
1822                 list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
1823
1824                 for (i = begin; i < next_begin; i++) {
1825                         addr = &dev->data->mac_addrs[i];
1826                         if (is_zero_ether_addr(addr))
1827                                 continue;
1828                         (void)rte_memcpy(list->list[j].addr, addr->addr_bytes,
1829                                          sizeof(addr->addr_bytes));
1830                         PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
1831                                     addr->addr_bytes[0], addr->addr_bytes[1],
1832                                     addr->addr_bytes[2], addr->addr_bytes[3],
1833                                     addr->addr_bytes[4], addr->addr_bytes[5]);
1834                         j++;
1835                 }
1836                 list->vsi_id = vf->vsi_res->vsi_id;
1837                 list->num_elements = j;
1838                 args.ops = add ? I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS :
1839                            I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
1840                 args.in_args = (uint8_t *)list;
1841                 args.in_args_size = len;
1842                 args.out_buffer = cmd_result_buffer;
1843                 args.out_size = I40E_AQ_BUF_SZ;
1844                 err = i40evf_execute_vf_cmd(dev, &args);
1845                 if (err)
1846                         PMD_DRV_LOG(ERR, "fail to execute command %s",
1847                                     add ? "OP_ADD_ETHER_ADDRESS" :
1848                                     "OP_DEL_ETHER_ADDRESS");
1849                 rte_free(list);
1850                 begin = next_begin;
1851         } while (begin < I40E_NUM_MACADDR_MAX);
1852 }
1853
1854 static int
1855 i40evf_dev_start(struct rte_eth_dev *dev)
1856 {
1857         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1858         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1859         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1860         uint32_t intr_vector = 0;
1861
1862         PMD_INIT_FUNC_TRACE();
1863
1864         hw->adapter_stopped = 0;
1865
1866         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1867         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1868                                         dev->data->nb_tx_queues);
1869
1870         /* check and configure queue intr-vector mapping */
1871         if (dev->data->dev_conf.intr_conf.rxq != 0) {
1872                 intr_vector = dev->data->nb_rx_queues;
1873                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1874                         return -1;
1875         }
1876
1877         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1878                 intr_handle->intr_vec =
1879                         rte_zmalloc("intr_vec",
1880                                     dev->data->nb_rx_queues * sizeof(int), 0);
1881                 if (!intr_handle->intr_vec) {
1882                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1883                                      " intr_vec\n", dev->data->nb_rx_queues);
1884                         return -ENOMEM;
1885                 }
1886         }
1887
1888         if (i40evf_rx_init(dev) != 0){
1889                 PMD_DRV_LOG(ERR, "failed to do RX init");
1890                 return -1;
1891         }
1892
1893         i40evf_tx_init(dev);
1894
1895         if (i40evf_configure_queues(dev) != 0) {
1896                 PMD_DRV_LOG(ERR, "configure queues failed");
1897                 goto err_queue;
1898         }
1899         if (i40evf_config_irq_map(dev)) {
1900                 PMD_DRV_LOG(ERR, "config_irq_map failed");
1901                 goto err_queue;
1902         }
1903
1904         /* Set all mac addrs */
1905         i40evf_add_del_all_mac_addr(dev, TRUE);
1906
1907         if (i40evf_start_queues(dev) != 0) {
1908                 PMD_DRV_LOG(ERR, "enable queues failed");
1909                 goto err_mac;
1910         }
1911
1912         /* vf don't allow intr except for rxq intr */
1913         if (dev->data->dev_conf.intr_conf.rxq != 0)
1914                 rte_intr_enable(intr_handle);
1915
1916         i40evf_enable_queues_intr(dev);
1917         return 0;
1918
1919 err_mac:
1920         i40evf_add_del_all_mac_addr(dev, FALSE);
1921 err_queue:
1922         return -1;
1923 }
1924
1925 static void
1926 i40evf_dev_stop(struct rte_eth_dev *dev)
1927 {
1928         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1929
1930         PMD_INIT_FUNC_TRACE();
1931
1932         i40evf_stop_queues(dev);
1933         i40evf_disable_queues_intr(dev);
1934         i40e_dev_clear_queues(dev);
1935
1936         /* Clean datapath event and queue/vec mapping */
1937         rte_intr_efd_disable(intr_handle);
1938         if (intr_handle->intr_vec) {
1939                 rte_free(intr_handle->intr_vec);
1940                 intr_handle->intr_vec = NULL;
1941         }
1942         /* remove all mac addrs */
1943         i40evf_add_del_all_mac_addr(dev, FALSE);
1944
1945 }
1946
1947 static int
1948 i40evf_dev_link_update(struct rte_eth_dev *dev,
1949                        __rte_unused int wait_to_complete)
1950 {
1951         struct rte_eth_link new_link;
1952         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1953         /*
1954          * DPDK pf host provide interfacet to acquire link status
1955          * while Linux driver does not
1956          */
1957         if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1958                 i40evf_get_link_status(dev, &new_link);
1959         else {
1960                 /* Always assume it's up, for Linux driver PF host */
1961                 new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
1962                 new_link.link_speed  = ETH_LINK_SPEED_10000;
1963                 new_link.link_status = 1;
1964         }
1965         i40evf_dev_atomic_write_link_status(dev, &new_link);
1966
1967         return 0;
1968 }
1969
1970 static void
1971 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1972 {
1973         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1974         int ret;
1975
1976         /* If enabled, just return */
1977         if (vf->promisc_unicast_enabled)
1978                 return;
1979
1980         ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
1981         if (ret == 0)
1982                 vf->promisc_unicast_enabled = TRUE;
1983 }
1984
1985 static void
1986 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1987 {
1988         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1989         int ret;
1990
1991         /* If disabled, just return */
1992         if (!vf->promisc_unicast_enabled)
1993                 return;
1994
1995         ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
1996         if (ret == 0)
1997                 vf->promisc_unicast_enabled = FALSE;
1998 }
1999
2000 static void
2001 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
2002 {
2003         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2004         int ret;
2005
2006         /* If enabled, just return */
2007         if (vf->promisc_multicast_enabled)
2008                 return;
2009
2010         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
2011         if (ret == 0)
2012                 vf->promisc_multicast_enabled = TRUE;
2013 }
2014
2015 static void
2016 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
2017 {
2018         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2019         int ret;
2020
2021         /* If enabled, just return */
2022         if (!vf->promisc_multicast_enabled)
2023                 return;
2024
2025         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
2026         if (ret == 0)
2027                 vf->promisc_multicast_enabled = FALSE;
2028 }
2029
2030 static void
2031 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2032 {
2033         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2034
2035         memset(dev_info, 0, sizeof(*dev_info));
2036         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
2037         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
2038         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2039         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2040         dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2041         dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
2042         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2043         dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
2044         dev_info->rx_offload_capa =
2045                 DEV_RX_OFFLOAD_VLAN_STRIP |
2046                 DEV_RX_OFFLOAD_QINQ_STRIP |
2047                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2048                 DEV_RX_OFFLOAD_UDP_CKSUM |
2049                 DEV_RX_OFFLOAD_TCP_CKSUM;
2050         dev_info->tx_offload_capa =
2051                 DEV_TX_OFFLOAD_VLAN_INSERT |
2052                 DEV_TX_OFFLOAD_QINQ_INSERT |
2053                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2054                 DEV_TX_OFFLOAD_UDP_CKSUM |
2055                 DEV_TX_OFFLOAD_TCP_CKSUM |
2056                 DEV_TX_OFFLOAD_SCTP_CKSUM;
2057
2058         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2059                 .rx_thresh = {
2060                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
2061                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
2062                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
2063                 },
2064                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2065                 .rx_drop_en = 0,
2066         };
2067
2068         dev_info->default_txconf = (struct rte_eth_txconf) {
2069                 .tx_thresh = {
2070                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
2071                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
2072                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
2073                 },
2074                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2075                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2076                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2077                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2078         };
2079
2080         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2081                 .nb_max = I40E_MAX_RING_DESC,
2082                 .nb_min = I40E_MIN_RING_DESC,
2083                 .nb_align = I40E_ALIGN_RING_DESC,
2084         };
2085
2086         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2087                 .nb_max = I40E_MAX_RING_DESC,
2088                 .nb_min = I40E_MIN_RING_DESC,
2089                 .nb_align = I40E_ALIGN_RING_DESC,
2090         };
2091 }
2092
2093 static void
2094 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2095 {
2096         if (i40evf_get_statics(dev, stats))
2097                 PMD_DRV_LOG(ERR, "Get statics failed");
2098 }
2099
2100 static void
2101 i40evf_dev_close(struct rte_eth_dev *dev)
2102 {
2103         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2104
2105         i40evf_dev_stop(dev);
2106         hw->adapter_stopped = 1;
2107         i40e_dev_free_queues(dev);
2108         i40evf_reset_vf(hw);
2109         i40e_shutdown_adminq(hw);
2110 }
2111
2112 static int
2113 i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2114 {
2115         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2116         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2117         int ret;
2118
2119         if (!lut)
2120                 return -EINVAL;
2121
2122         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2123                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
2124                                           lut, lut_size);
2125                 if (ret) {
2126                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2127                         return ret;
2128                 }
2129         } else {
2130                 uint32_t *lut_dw = (uint32_t *)lut;
2131                 uint16_t i, lut_size_dw = lut_size / 4;
2132
2133                 for (i = 0; i < lut_size_dw; i++)
2134                         lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
2135         }
2136
2137         return 0;
2138 }
2139
2140 static int
2141 i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2142 {
2143         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2144         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2145         int ret;
2146
2147         if (!vsi || !lut)
2148                 return -EINVAL;
2149
2150         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2151                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
2152                                           lut, lut_size);
2153                 if (ret) {
2154                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2155                         return ret;
2156                 }
2157         } else {
2158                 uint32_t *lut_dw = (uint32_t *)lut;
2159                 uint16_t i, lut_size_dw = lut_size / 4;
2160
2161                 for (i = 0; i < lut_size_dw; i++)
2162                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
2163                 I40EVF_WRITE_FLUSH(hw);
2164         }
2165
2166         return 0;
2167 }
2168
2169 static int
2170 i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
2171                            struct rte_eth_rss_reta_entry64 *reta_conf,
2172                            uint16_t reta_size)
2173 {
2174         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2175         uint8_t *lut;
2176         uint16_t i, idx, shift;
2177         int ret;
2178
2179         if (reta_size != ETH_RSS_RETA_SIZE_64) {
2180                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2181                         "(%d) doesn't match the number of hardware can "
2182                         "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
2183                 return -EINVAL;
2184         }
2185
2186         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2187         if (!lut) {
2188                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2189                 return -ENOMEM;
2190         }
2191         ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2192         if (ret)
2193                 goto out;
2194         for (i = 0; i < reta_size; i++) {
2195                 idx = i / RTE_RETA_GROUP_SIZE;
2196                 shift = i % RTE_RETA_GROUP_SIZE;
2197                 if (reta_conf[idx].mask & (1ULL << shift))
2198                         lut[i] = reta_conf[idx].reta[shift];
2199         }
2200         ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
2201
2202 out:
2203         rte_free(lut);
2204
2205         return ret;
2206 }
2207
2208 static int
2209 i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
2210                           struct rte_eth_rss_reta_entry64 *reta_conf,
2211                           uint16_t reta_size)
2212 {
2213         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2214         uint16_t i, idx, shift;
2215         uint8_t *lut;
2216         int ret;
2217
2218         if (reta_size != ETH_RSS_RETA_SIZE_64) {
2219                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2220                         "(%d) doesn't match the number of hardware can "
2221                         "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
2222                 return -EINVAL;
2223         }
2224
2225         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2226         if (!lut) {
2227                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2228                 return -ENOMEM;
2229         }
2230
2231         ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2232         if (ret)
2233                 goto out;
2234         for (i = 0; i < reta_size; i++) {
2235                 idx = i / RTE_RETA_GROUP_SIZE;
2236                 shift = i % RTE_RETA_GROUP_SIZE;
2237                 if (reta_conf[idx].mask & (1ULL << shift))
2238                         reta_conf[idx].reta[shift] = lut[i];
2239         }
2240
2241 out:
2242         rte_free(lut);
2243
2244         return ret;
2245 }
2246
2247 static int
2248 i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
2249 {
2250         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2251         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2252         int ret = 0;
2253
2254         if (!key || key_len == 0) {
2255                 PMD_DRV_LOG(DEBUG, "No key to be configured");
2256                 return 0;
2257         } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2258                 sizeof(uint32_t)) {
2259                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2260                 return -EINVAL;
2261         }
2262
2263         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2264                 struct i40e_aqc_get_set_rss_key_data *key_dw =
2265                         (struct i40e_aqc_get_set_rss_key_data *)key;
2266
2267                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
2268                 if (ret)
2269                         PMD_INIT_LOG(ERR, "Failed to configure RSS key "
2270                                      "via AQ");
2271         } else {
2272                 uint32_t *hash_key = (uint32_t *)key;
2273                 uint16_t i;
2274
2275                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2276                         i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]);
2277                 I40EVF_WRITE_FLUSH(hw);
2278         }
2279
2280         return ret;
2281 }
2282
2283 static int
2284 i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
2285 {
2286         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2287         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2288         int ret;
2289
2290         if (!key || !key_len)
2291                 return -EINVAL;
2292
2293         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2294                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
2295                         (struct i40e_aqc_get_set_rss_key_data *)key);
2296                 if (ret) {
2297                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
2298                         return ret;
2299                 }
2300         } else {
2301                 uint32_t *key_dw = (uint32_t *)key;
2302                 uint16_t i;
2303
2304                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2305                         key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i));
2306         }
2307         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2308
2309         return 0;
2310 }
2311
2312 static int
2313 i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
2314 {
2315         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2316         uint64_t rss_hf, hena;
2317         int ret;
2318
2319         ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
2320                                  rss_conf->rss_key_len);
2321         if (ret)
2322                 return ret;
2323
2324         rss_hf = rss_conf->rss_hf;
2325         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2326         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2327         hena &= ~I40E_RSS_HENA_ALL;
2328         hena |= i40e_config_hena(rss_hf);
2329         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2330         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2331         I40EVF_WRITE_FLUSH(hw);
2332
2333         return 0;
2334 }
2335
2336 static void
2337 i40evf_disable_rss(struct i40e_vf *vf)
2338 {
2339         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2340         uint64_t hena;
2341
2342         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2343         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2344         hena &= ~I40E_RSS_HENA_ALL;
2345         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2346         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2347         I40EVF_WRITE_FLUSH(hw);
2348 }
2349
2350 static int
2351 i40evf_config_rss(struct i40e_vf *vf)
2352 {
2353         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2354         struct rte_eth_rss_conf rss_conf;
2355         uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
2356         uint16_t num;
2357
2358         if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
2359                 i40evf_disable_rss(vf);
2360                 PMD_DRV_LOG(DEBUG, "RSS not configured\n");
2361                 return 0;
2362         }
2363
2364         num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
2365         /* Fill out the look up table */
2366         for (i = 0, j = 0; i < nb_q; i++, j++) {
2367                 if (j >= num)
2368                         j = 0;
2369                 lut = (lut << 8) | j;
2370                 if ((i & 3) == 3)
2371                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
2372         }
2373
2374         rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
2375         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
2376                 i40evf_disable_rss(vf);
2377                 PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
2378                 return 0;
2379         }
2380
2381         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
2382                 (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
2383                 /* Calculate the default hash key */
2384                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2385                         rss_key_default[i] = (uint32_t)rte_rand();
2386                 rss_conf.rss_key = (uint8_t *)rss_key_default;
2387                 rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2388                         sizeof(uint32_t);
2389         }
2390
2391         return i40evf_hw_rss_hash_set(vf, &rss_conf);
2392 }
2393
2394 static int
2395 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
2396                            struct rte_eth_rss_conf *rss_conf)
2397 {
2398         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2399         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2400         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
2401         uint64_t hena;
2402
2403         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2404         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2405         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
2406                 if (rss_hf != 0) /* Enable RSS */
2407                         return -EINVAL;
2408                 return 0;
2409         }
2410
2411         /* RSS enabled */
2412         if (rss_hf == 0) /* Disable RSS */
2413                 return -EINVAL;
2414
2415         return i40evf_hw_rss_hash_set(vf, rss_conf);
2416 }
2417
2418 static int
2419 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2420                              struct rte_eth_rss_conf *rss_conf)
2421 {
2422         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2423         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2424         uint64_t hena;
2425
2426         i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
2427                            &rss_conf->rss_key_len);
2428
2429         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2430         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2431         rss_conf->rss_hf = i40e_parse_hena(hena);
2432
2433         return 0;
2434 }