net/i40e: fix multi-queue Rx interrupt for VF
[dpdk.git] / drivers / net / i40e / i40e_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cycles.h>
16
17 #include <rte_interrupts.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_pci.h>
21 #include <rte_bus_pci.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_eal.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_ethdev_pci.h>
30 #include <rte_malloc.h>
31 #include <rte_dev.h>
32
33 #include "i40e_logs.h"
34 #include "base/i40e_prototype.h"
35 #include "base/i40e_adminq_cmd.h"
36 #include "base/i40e_type.h"
37
38 #include "i40e_rxtx.h"
39 #include "i40e_ethdev.h"
40 #include "i40e_pf.h"
41
42 /* busy wait delay in msec */
43 #define I40EVF_BUSY_WAIT_DELAY 10
44 #define I40EVF_BUSY_WAIT_COUNT 50
45 #define MAX_RESET_WAIT_CNT     20
46
47 #define I40EVF_ALARM_INTERVAL 50000 /* us */
48
49 struct i40evf_arq_msg_info {
50         enum virtchnl_ops ops;
51         enum i40e_status_code result;
52         uint16_t buf_len;
53         uint16_t msg_len;
54         uint8_t *msg;
55 };
56
57 struct vf_cmd_info {
58         enum virtchnl_ops ops;
59         uint8_t *in_args;
60         uint32_t in_args_size;
61         uint8_t *out_buffer;
62         /* Input & output type. pass in buffer size and pass out
63          * actual return result
64          */
65         uint32_t out_size;
66 };
67
68 enum i40evf_aq_result {
69         I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
70         I40EVF_MSG_NON,      /* Read nothing from admin queue */
71         I40EVF_MSG_SYS,      /* Read system msg from admin queue */
72         I40EVF_MSG_CMD,      /* Read async command result */
73 };
74
75 static int i40evf_dev_configure(struct rte_eth_dev *dev);
76 static int i40evf_dev_start(struct rte_eth_dev *dev);
77 static void i40evf_dev_stop(struct rte_eth_dev *dev);
78 static int i40evf_dev_info_get(struct rte_eth_dev *dev,
79                                struct rte_eth_dev_info *dev_info);
80 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
81                                   int wait_to_complete);
82 static int i40evf_dev_stats_get(struct rte_eth_dev *dev,
83                                 struct rte_eth_stats *stats);
84 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
85                                  struct rte_eth_xstat *xstats, unsigned n);
86 static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev,
87                                        struct rte_eth_xstat_name *xstats_names,
88                                        unsigned limit);
89 static int i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
90 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
91                                   uint16_t vlan_id, int on);
92 static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
93 static void i40evf_dev_close(struct rte_eth_dev *dev);
94 static int  i40evf_dev_reset(struct rte_eth_dev *dev);
95 static int i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
96 static int i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
97 static int i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
98 static int i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
99 static int i40evf_init_vlan(struct rte_eth_dev *dev);
100 static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
101                                      uint16_t rx_queue_id);
102 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
103                                     uint16_t rx_queue_id);
104 static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
105                                      uint16_t tx_queue_id);
106 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
107                                     uint16_t tx_queue_id);
108 static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
109                                struct rte_ether_addr *addr,
110                                uint32_t index,
111                                uint32_t pool);
112 static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
113 static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
114                         struct rte_eth_rss_reta_entry64 *reta_conf,
115                         uint16_t reta_size);
116 static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
117                         struct rte_eth_rss_reta_entry64 *reta_conf,
118                         uint16_t reta_size);
119 static int i40evf_config_rss(struct i40e_vf *vf);
120 static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
121                                       struct rte_eth_rss_conf *rss_conf);
122 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
123                                         struct rte_eth_rss_conf *rss_conf);
124 static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
125 static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
126                                         struct rte_ether_addr *mac_addr);
127 static int
128 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
129 static int
130 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
131 static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
132                                    uint8_t *msg,
133                                    uint16_t msglen);
134
135 static int
136 i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
137                         struct rte_ether_addr *mc_addr_set,
138                         uint32_t nb_mc_addr, bool add);
139 static int
140 i40evf_set_mc_addr_list(struct rte_eth_dev *dev,
141                         struct rte_ether_addr *mc_addr_set,
142                         uint32_t nb_mc_addr);
143 static void
144 i40evf_dev_alarm_handler(void *param);
145
146 /* Default hash key buffer for RSS */
147 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
148
149 struct rte_i40evf_xstats_name_off {
150         char name[RTE_ETH_XSTATS_NAME_SIZE];
151         unsigned offset;
152 };
153
154 static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
155         {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
156         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
157         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
158         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
159         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
160         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
161                 rx_unknown_protocol)},
162         {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
163         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
164         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
165         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
166         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
167         {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)},
168 };
169
170 #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
171                 sizeof(rte_i40evf_stats_strings[0]))
172
173 static const struct eth_dev_ops i40evf_eth_dev_ops = {
174         .dev_configure        = i40evf_dev_configure,
175         .dev_start            = i40evf_dev_start,
176         .dev_stop             = i40evf_dev_stop,
177         .promiscuous_enable   = i40evf_dev_promiscuous_enable,
178         .promiscuous_disable  = i40evf_dev_promiscuous_disable,
179         .allmulticast_enable  = i40evf_dev_allmulticast_enable,
180         .allmulticast_disable = i40evf_dev_allmulticast_disable,
181         .link_update          = i40evf_dev_link_update,
182         .stats_get            = i40evf_dev_stats_get,
183         .stats_reset          = i40evf_dev_xstats_reset,
184         .xstats_get           = i40evf_dev_xstats_get,
185         .xstats_get_names     = i40evf_dev_xstats_get_names,
186         .xstats_reset         = i40evf_dev_xstats_reset,
187         .dev_close            = i40evf_dev_close,
188         .dev_reset            = i40evf_dev_reset,
189         .dev_infos_get        = i40evf_dev_info_get,
190         .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
191         .vlan_filter_set      = i40evf_vlan_filter_set,
192         .vlan_offload_set     = i40evf_vlan_offload_set,
193         .rx_queue_start       = i40evf_dev_rx_queue_start,
194         .rx_queue_stop        = i40evf_dev_rx_queue_stop,
195         .tx_queue_start       = i40evf_dev_tx_queue_start,
196         .tx_queue_stop        = i40evf_dev_tx_queue_stop,
197         .rx_queue_setup       = i40e_dev_rx_queue_setup,
198         .rx_queue_release     = i40e_dev_rx_queue_release,
199         .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
200         .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
201         .rx_descriptor_done   = i40e_dev_rx_descriptor_done,
202         .rx_descriptor_status = i40e_dev_rx_descriptor_status,
203         .tx_descriptor_status = i40e_dev_tx_descriptor_status,
204         .tx_queue_setup       = i40e_dev_tx_queue_setup,
205         .tx_queue_release     = i40e_dev_tx_queue_release,
206         .rx_queue_count       = i40e_dev_rx_queue_count,
207         .rxq_info_get         = i40e_rxq_info_get,
208         .txq_info_get         = i40e_txq_info_get,
209         .mac_addr_add         = i40evf_add_mac_addr,
210         .mac_addr_remove      = i40evf_del_mac_addr,
211         .set_mc_addr_list     = i40evf_set_mc_addr_list,
212         .reta_update          = i40evf_dev_rss_reta_update,
213         .reta_query           = i40evf_dev_rss_reta_query,
214         .rss_hash_update      = i40evf_dev_rss_hash_update,
215         .rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
216         .mtu_set              = i40evf_dev_mtu_set,
217         .mac_addr_set         = i40evf_set_default_mac_addr,
218         .tx_done_cleanup      = i40e_tx_done_cleanup,
219 };
220
221 /*
222  * Read data in admin queue to get msg from pf driver
223  */
224 static enum i40evf_aq_result
225 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
226 {
227         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
228         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
229         struct i40e_arq_event_info event;
230         enum virtchnl_ops opcode;
231         enum i40e_status_code retval;
232         int ret;
233         enum i40evf_aq_result result = I40EVF_MSG_NON;
234
235         event.buf_len = data->buf_len;
236         event.msg_buf = data->msg;
237         ret = i40e_clean_arq_element(hw, &event, NULL);
238         /* Can't read any msg from adminQ */
239         if (ret) {
240                 if (ret != I40E_ERR_ADMIN_QUEUE_NO_WORK)
241                         result = I40EVF_MSG_ERR;
242                 return result;
243         }
244
245         opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
246         retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low);
247         /* pf sys event */
248         if (opcode == VIRTCHNL_OP_EVENT) {
249                 struct virtchnl_pf_event *vpe =
250                         (struct virtchnl_pf_event *)event.msg_buf;
251
252                 result = I40EVF_MSG_SYS;
253                 switch (vpe->event) {
254                 case VIRTCHNL_EVENT_LINK_CHANGE:
255                         vf->link_up =
256                                 vpe->event_data.link_event.link_status;
257                         vf->link_speed =
258                                 vpe->event_data.link_event.link_speed;
259                         vf->pend_msg |= PFMSG_LINK_CHANGE;
260                         PMD_DRV_LOG(INFO, "Link status update:%s",
261                                     vf->link_up ? "up" : "down");
262                         break;
263                 case VIRTCHNL_EVENT_RESET_IMPENDING:
264                         vf->vf_reset = true;
265                         vf->pend_msg |= PFMSG_RESET_IMPENDING;
266                         PMD_DRV_LOG(INFO, "vf is reseting");
267                         break;
268                 case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
269                         vf->dev_closed = true;
270                         vf->pend_msg |= PFMSG_DRIVER_CLOSE;
271                         PMD_DRV_LOG(INFO, "PF driver closed");
272                         break;
273                 default:
274                         PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
275                                     __func__, vpe->event);
276                 }
277         } else {
278                 /* async reply msg on command issued by vf previously */
279                 result = I40EVF_MSG_CMD;
280                 /* Actual data length read from PF */
281                 data->msg_len = event.msg_len;
282         }
283
284         data->result = retval;
285         data->ops = opcode;
286
287         return result;
288 }
289
290 /**
291  * clear current command. Only call in case execute
292  * _atomic_set_cmd successfully.
293  */
294 static inline void
295 _clear_cmd(struct i40e_vf *vf)
296 {
297         rte_wmb();
298         vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
299 }
300
301 /*
302  * Check there is pending cmd in execution. If none, set new command.
303  */
304 static inline int
305 _atomic_set_cmd(struct i40e_vf *vf, enum virtchnl_ops ops)
306 {
307         int ret = rte_atomic32_cmpset(&vf->pend_cmd,
308                         VIRTCHNL_OP_UNKNOWN, ops);
309
310         if (!ret)
311                 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
312
313         return !ret;
314 }
315
316 #define MAX_TRY_TIMES 200
317 #define ASQ_DELAY_MS  10
318
319 static int
320 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
321 {
322         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
323         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
324         struct i40evf_arq_msg_info info;
325         enum i40evf_aq_result ret;
326         int err, i = 0;
327
328         if (_atomic_set_cmd(vf, args->ops))
329                 return -1;
330
331         info.msg = args->out_buffer;
332         info.buf_len = args->out_size;
333         info.ops = VIRTCHNL_OP_UNKNOWN;
334         info.result = I40E_SUCCESS;
335
336         err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
337                      args->in_args, args->in_args_size, NULL);
338         if (err) {
339                 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
340                 _clear_cmd(vf);
341                 return err;
342         }
343
344         switch (args->ops) {
345         case VIRTCHNL_OP_RESET_VF:
346                 /*no need to process in this function */
347                 err = 0;
348                 break;
349         case VIRTCHNL_OP_VERSION:
350         case VIRTCHNL_OP_GET_VF_RESOURCES:
351                 /* for init adminq commands, need to poll the response */
352                 err = -1;
353                 do {
354                         ret = i40evf_read_pfmsg(dev, &info);
355                         vf->cmd_retval = info.result;
356                         if (ret == I40EVF_MSG_CMD) {
357                                 err = 0;
358                                 break;
359                         } else if (ret == I40EVF_MSG_ERR)
360                                 break;
361                         rte_delay_ms(ASQ_DELAY_MS);
362                         /* If don't read msg or read sys event, continue */
363                 } while (i++ < MAX_TRY_TIMES);
364                 _clear_cmd(vf);
365                 break;
366         case VIRTCHNL_OP_REQUEST_QUEUES:
367                 /**
368                  * ignore async reply, only wait for system message,
369                  * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING,
370                  * if not, means request queues failed.
371                  */
372                 err = -1;
373                 do {
374                         ret = i40evf_read_pfmsg(dev, &info);
375                         vf->cmd_retval = info.result;
376                         if (ret == I40EVF_MSG_SYS && vf->vf_reset) {
377                                 err = 0;
378                                 break;
379                         } else if (ret == I40EVF_MSG_ERR ||
380                                            ret == I40EVF_MSG_CMD) {
381                                 break;
382                         }
383                         rte_delay_ms(ASQ_DELAY_MS);
384                         /* If don't read msg or read sys event, continue */
385                 } while (i++ < MAX_TRY_TIMES);
386                 _clear_cmd(vf);
387                 break;
388
389         default:
390                 /* for other adminq in running time, waiting the cmd done flag */
391                 err = -1;
392                 do {
393                         if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) {
394                                 err = 0;
395                                 break;
396                         }
397                         rte_delay_ms(ASQ_DELAY_MS);
398                         /* If don't read msg or read sys event, continue */
399                 } while (i++ < MAX_TRY_TIMES);
400                 /* If there's no response is received, clear command */
401                 if (i >= MAX_TRY_TIMES) {
402                         PMD_DRV_LOG(WARNING, "No response for %d", args->ops);
403                         _clear_cmd(vf);
404                 }
405                 break;
406         }
407
408         return err | vf->cmd_retval;
409 }
410
411 /*
412  * Check API version with sync wait until version read or fail from admin queue
413  */
414 static int
415 i40evf_check_api_version(struct rte_eth_dev *dev)
416 {
417         struct virtchnl_version_info version, *pver;
418         int err;
419         struct vf_cmd_info args;
420         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
421
422         version.major = VIRTCHNL_VERSION_MAJOR;
423         version.minor = VIRTCHNL_VERSION_MINOR;
424
425         args.ops = VIRTCHNL_OP_VERSION;
426         args.in_args = (uint8_t *)&version;
427         args.in_args_size = sizeof(version);
428         args.out_buffer = vf->aq_resp;
429         args.out_size = I40E_AQ_BUF_SZ;
430
431         err = i40evf_execute_vf_cmd(dev, &args);
432         if (err) {
433                 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
434                 return err;
435         }
436
437         pver = (struct virtchnl_version_info *)args.out_buffer;
438         vf->version_major = pver->major;
439         vf->version_minor = pver->minor;
440         if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
441                 (vf->version_minor <= VIRTCHNL_VERSION_MINOR))
442                 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
443         else {
444                 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
445                                         vf->version_major, vf->version_minor,
446                                                 VIRTCHNL_VERSION_MAJOR,
447                                                 VIRTCHNL_VERSION_MINOR);
448                 return -1;
449         }
450
451         return 0;
452 }
453
454 static int
455 i40evf_get_vf_resource(struct rte_eth_dev *dev)
456 {
457         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
458         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
459         int err;
460         struct vf_cmd_info args;
461         uint32_t caps, len;
462
463         args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
464         args.out_buffer = vf->aq_resp;
465         args.out_size = I40E_AQ_BUF_SZ;
466         if (PF_IS_V11(vf)) {
467                 caps = VIRTCHNL_VF_OFFLOAD_L2 |
468                        VIRTCHNL_VF_OFFLOAD_RSS_AQ |
469                        VIRTCHNL_VF_OFFLOAD_RSS_REG |
470                        VIRTCHNL_VF_OFFLOAD_VLAN |
471                        VIRTCHNL_VF_OFFLOAD_RX_POLLING;
472                 args.in_args = (uint8_t *)&caps;
473                 args.in_args_size = sizeof(caps);
474         } else {
475                 args.in_args = NULL;
476                 args.in_args_size = 0;
477         }
478         err = i40evf_execute_vf_cmd(dev, &args);
479
480         if (err) {
481                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
482                 return err;
483         }
484
485         len =  sizeof(struct virtchnl_vf_resource) +
486                 I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
487
488         rte_memcpy(vf->vf_res, args.out_buffer,
489                         RTE_MIN(args.out_size, len));
490         i40e_vf_parse_hw_config(hw, vf->vf_res);
491
492         return 0;
493 }
494
495 static int
496 i40evf_config_promisc(struct rte_eth_dev *dev,
497                       bool enable_unicast,
498                       bool enable_multicast)
499 {
500         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
501         int err;
502         struct vf_cmd_info args;
503         struct virtchnl_promisc_info promisc;
504
505         promisc.flags = 0;
506         promisc.vsi_id = vf->vsi_res->vsi_id;
507
508         if (enable_unicast)
509                 promisc.flags |= FLAG_VF_UNICAST_PROMISC;
510
511         if (enable_multicast)
512                 promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
513
514         args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
515         args.in_args = (uint8_t *)&promisc;
516         args.in_args_size = sizeof(promisc);
517         args.out_buffer = vf->aq_resp;
518         args.out_size = I40E_AQ_BUF_SZ;
519
520         err = i40evf_execute_vf_cmd(dev, &args);
521
522         if (err)
523                 PMD_DRV_LOG(ERR, "fail to execute command "
524                             "CONFIG_PROMISCUOUS_MODE");
525         return err;
526 }
527
528 static int
529 i40evf_enable_vlan_strip(struct rte_eth_dev *dev)
530 {
531         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
532         struct vf_cmd_info args;
533         int ret;
534
535         memset(&args, 0, sizeof(args));
536         args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
537         args.in_args = NULL;
538         args.in_args_size = 0;
539         args.out_buffer = vf->aq_resp;
540         args.out_size = I40E_AQ_BUF_SZ;
541         ret = i40evf_execute_vf_cmd(dev, &args);
542         if (ret)
543                 PMD_DRV_LOG(ERR, "Failed to execute command of "
544                             "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING");
545
546         return ret;
547 }
548
549 static int
550 i40evf_disable_vlan_strip(struct rte_eth_dev *dev)
551 {
552         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
553         struct vf_cmd_info args;
554         int ret;
555
556         memset(&args, 0, sizeof(args));
557         args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
558         args.in_args = NULL;
559         args.in_args_size = 0;
560         args.out_buffer = vf->aq_resp;
561         args.out_size = I40E_AQ_BUF_SZ;
562         ret = i40evf_execute_vf_cmd(dev, &args);
563         if (ret)
564                 PMD_DRV_LOG(ERR, "Failed to execute command of "
565                             "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING");
566
567         return ret;
568 }
569
570 static void
571 i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info,
572                                   uint16_t vsi_id,
573                                   uint16_t queue_id,
574                                   uint16_t nb_txq,
575                                   struct i40e_tx_queue *txq)
576 {
577         txq_info->vsi_id = vsi_id;
578         txq_info->queue_id = queue_id;
579         if (queue_id < nb_txq && txq) {
580                 txq_info->ring_len = txq->nb_tx_desc;
581                 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
582         }
583 }
584
585 static void
586 i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info,
587                                   uint16_t vsi_id,
588                                   uint16_t queue_id,
589                                   uint16_t nb_rxq,
590                                   uint32_t max_pkt_size,
591                                   struct i40e_rx_queue *rxq)
592 {
593         rxq_info->vsi_id = vsi_id;
594         rxq_info->queue_id = queue_id;
595         rxq_info->max_pkt_size = max_pkt_size;
596         if (queue_id < nb_rxq && rxq) {
597                 rxq_info->ring_len = rxq->nb_rx_desc;
598                 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
599                 rxq_info->databuffer_size =
600                         (rte_pktmbuf_data_room_size(rxq->mp) -
601                                 RTE_PKTMBUF_HEADROOM);
602         }
603 }
604
605 static int
606 i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
607 {
608         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
609         struct i40e_rx_queue **rxq =
610                 (struct i40e_rx_queue **)dev->data->rx_queues;
611         struct i40e_tx_queue **txq =
612                 (struct i40e_tx_queue **)dev->data->tx_queues;
613         struct virtchnl_vsi_queue_config_info *vc_vqci;
614         struct virtchnl_queue_pair_info *vc_qpi;
615         struct vf_cmd_info args;
616         uint16_t i, nb_qp = vf->num_queue_pairs;
617         const uint32_t size =
618                 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
619         uint8_t buff[size];
620         int ret;
621
622         memset(buff, 0, sizeof(buff));
623         vc_vqci = (struct virtchnl_vsi_queue_config_info *)buff;
624         vc_vqci->vsi_id = vf->vsi_res->vsi_id;
625         vc_vqci->num_queue_pairs = nb_qp;
626
627         for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
628                 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
629                         vc_vqci->vsi_id, i, dev->data->nb_tx_queues,
630                         txq ? txq[i] : NULL);
631                 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
632                         vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
633                         vf->max_pkt_len, rxq ? rxq[i] : NULL);
634         }
635         memset(&args, 0, sizeof(args));
636         args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
637         args.in_args = (uint8_t *)vc_vqci;
638         args.in_args_size = size;
639         args.out_buffer = vf->aq_resp;
640         args.out_size = I40E_AQ_BUF_SZ;
641         ret = i40evf_execute_vf_cmd(dev, &args);
642         if (ret)
643                 PMD_DRV_LOG(ERR, "Failed to execute command of "
644                         "VIRTCHNL_OP_CONFIG_VSI_QUEUES");
645
646         return ret;
647 }
648
649 static int
650 i40evf_config_irq_map(struct rte_eth_dev *dev)
651 {
652         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
653         struct vf_cmd_info args;
654         uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
655                 sizeof(struct virtchnl_vector_map) * dev->data->nb_rx_queues];
656         struct virtchnl_irq_map_info *map_info;
657         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
658         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
659         uint32_t vector_id;
660         int i, err;
661
662         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
663             rte_intr_allow_others(intr_handle))
664                 vector_id = I40E_RX_VEC_START;
665         else
666                 vector_id = I40E_MISC_VEC_ID;
667
668         map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
669         map_info->num_vectors = dev->data->nb_rx_queues;
670         for (i = 0; i < dev->data->nb_rx_queues; i++) {
671                 map_info->vecmap[i].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
672                 map_info->vecmap[i].vsi_id = vf->vsi_res->vsi_id;
673                 /* Always use default dynamic MSIX interrupt */
674                 map_info->vecmap[i].vector_id = vector_id;
675                 /* Don't map any tx queue */
676                 map_info->vecmap[i].txq_map = 0;
677                 map_info->vecmap[i].rxq_map = 1 << i;
678                 if (rte_intr_dp_is_en(intr_handle))
679                         intr_handle->intr_vec[i] = vector_id;
680                 if (vector_id > I40E_MISC_VEC_ID)
681                         vector_id++;
682                 if (vector_id >= vf->vf_res->max_vectors)
683                         vector_id = I40E_RX_VEC_START;
684         }
685
686         args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
687         args.in_args = (u8 *)cmd_buffer;
688         args.in_args_size = sizeof(cmd_buffer);
689         args.out_buffer = vf->aq_resp;
690         args.out_size = I40E_AQ_BUF_SZ;
691         err = i40evf_execute_vf_cmd(dev, &args);
692         if (err)
693                 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
694
695         return err;
696 }
697
698 static int
699 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
700                                 bool on)
701 {
702         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
703         struct virtchnl_queue_select queue_select;
704         int err;
705         struct vf_cmd_info args;
706         memset(&queue_select, 0, sizeof(queue_select));
707         queue_select.vsi_id = vf->vsi_res->vsi_id;
708
709         if (isrx)
710                 queue_select.rx_queues |= 1 << qid;
711         else
712                 queue_select.tx_queues |= 1 << qid;
713
714         if (on)
715                 args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
716         else
717                 args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
718         args.in_args = (u8 *)&queue_select;
719         args.in_args_size = sizeof(queue_select);
720         args.out_buffer = vf->aq_resp;
721         args.out_size = I40E_AQ_BUF_SZ;
722         err = i40evf_execute_vf_cmd(dev, &args);
723         if (err)
724                 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
725                             isrx ? "RX" : "TX", qid, on ? "on" : "off");
726
727         return err;
728 }
729
730 static int
731 i40evf_start_queues(struct rte_eth_dev *dev)
732 {
733         struct rte_eth_dev_data *dev_data = dev->data;
734         int i;
735         struct i40e_rx_queue *rxq;
736         struct i40e_tx_queue *txq;
737
738         for (i = 0; i < dev->data->nb_rx_queues; i++) {
739                 rxq = dev_data->rx_queues[i];
740                 if (rxq->rx_deferred_start)
741                         continue;
742                 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
743                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
744                         return -1;
745                 }
746         }
747
748         for (i = 0; i < dev->data->nb_tx_queues; i++) {
749                 txq = dev_data->tx_queues[i];
750                 if (txq->tx_deferred_start)
751                         continue;
752                 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
753                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
754                         return -1;
755                 }
756         }
757
758         return 0;
759 }
760
761 static int
762 i40evf_stop_queues(struct rte_eth_dev *dev)
763 {
764         int i;
765
766         /* Stop TX queues first */
767         for (i = 0; i < dev->data->nb_tx_queues; i++) {
768                 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
769                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
770                         return -1;
771                 }
772         }
773
774         /* Then stop RX queues */
775         for (i = 0; i < dev->data->nb_rx_queues; i++) {
776                 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
777                         PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
778                         return -1;
779                 }
780         }
781
782         return 0;
783 }
784
785 static int
786 i40evf_add_mac_addr(struct rte_eth_dev *dev,
787                     struct rte_ether_addr *addr,
788                     __rte_unused uint32_t index,
789                     __rte_unused uint32_t pool)
790 {
791         struct virtchnl_ether_addr_list *list;
792         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
793         uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
794                         sizeof(struct virtchnl_ether_addr)];
795         int err;
796         struct vf_cmd_info args;
797
798         if (rte_is_zero_ether_addr(addr)) {
799                 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
800                             addr->addr_bytes[0], addr->addr_bytes[1],
801                             addr->addr_bytes[2], addr->addr_bytes[3],
802                             addr->addr_bytes[4], addr->addr_bytes[5]);
803                 return I40E_ERR_INVALID_MAC_ADDR;
804         }
805
806         list = (struct virtchnl_ether_addr_list *)cmd_buffer;
807         list->vsi_id = vf->vsi_res->vsi_id;
808         list->num_elements = 1;
809         rte_memcpy(list->list[0].addr, addr->addr_bytes,
810                                         sizeof(addr->addr_bytes));
811
812         args.ops = VIRTCHNL_OP_ADD_ETH_ADDR;
813         args.in_args = cmd_buffer;
814         args.in_args_size = sizeof(cmd_buffer);
815         args.out_buffer = vf->aq_resp;
816         args.out_size = I40E_AQ_BUF_SZ;
817         err = i40evf_execute_vf_cmd(dev, &args);
818         if (err)
819                 PMD_DRV_LOG(ERR, "fail to execute command "
820                             "OP_ADD_ETHER_ADDRESS");
821         else
822                 vf->vsi.mac_num++;
823
824         return err;
825 }
826
827 static void
828 i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
829                             struct rte_ether_addr *addr)
830 {
831         struct virtchnl_ether_addr_list *list;
832         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
833         uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
834                         sizeof(struct virtchnl_ether_addr)];
835         int err;
836         struct vf_cmd_info args;
837
838         if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
839                 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
840                             addr->addr_bytes[0], addr->addr_bytes[1],
841                             addr->addr_bytes[2], addr->addr_bytes[3],
842                             addr->addr_bytes[4], addr->addr_bytes[5]);
843                 return;
844         }
845
846         list = (struct virtchnl_ether_addr_list *)cmd_buffer;
847         list->vsi_id = vf->vsi_res->vsi_id;
848         list->num_elements = 1;
849         rte_memcpy(list->list[0].addr, addr->addr_bytes,
850                         sizeof(addr->addr_bytes));
851
852         args.ops = VIRTCHNL_OP_DEL_ETH_ADDR;
853         args.in_args = cmd_buffer;
854         args.in_args_size = sizeof(cmd_buffer);
855         args.out_buffer = vf->aq_resp;
856         args.out_size = I40E_AQ_BUF_SZ;
857         err = i40evf_execute_vf_cmd(dev, &args);
858         if (err)
859                 PMD_DRV_LOG(ERR, "fail to execute command "
860                             "OP_DEL_ETHER_ADDRESS");
861         else
862                 vf->vsi.mac_num--;
863         return;
864 }
865
866 static void
867 i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
868 {
869         struct rte_eth_dev_data *data = dev->data;
870         struct rte_ether_addr *addr;
871
872         addr = &data->mac_addrs[index];
873
874         i40evf_del_mac_addr_by_addr(dev, addr);
875 }
876
877 static int
878 i40evf_query_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
879 {
880         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
881         struct virtchnl_queue_select q_stats;
882         int err;
883         struct vf_cmd_info args;
884
885         memset(&q_stats, 0, sizeof(q_stats));
886         q_stats.vsi_id = vf->vsi_res->vsi_id;
887         args.ops = VIRTCHNL_OP_GET_STATS;
888         args.in_args = (u8 *)&q_stats;
889         args.in_args_size = sizeof(q_stats);
890         args.out_buffer = vf->aq_resp;
891         args.out_size = I40E_AQ_BUF_SZ;
892
893         err = i40evf_execute_vf_cmd(dev, &args);
894         if (err) {
895                 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
896                 *pstats = NULL;
897                 return err;
898         }
899         *pstats = (struct i40e_eth_stats *)args.out_buffer;
900         return 0;
901 }
902
903 static void
904 i40evf_stat_update_48(uint64_t *offset,
905                    uint64_t *stat)
906 {
907         if (*stat >= *offset)
908                 *stat = *stat - *offset;
909         else
910                 *stat = (uint64_t)((*stat +
911                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
912
913         *stat &= I40E_48_BIT_MASK;
914 }
915
916 static void
917 i40evf_stat_update_32(uint64_t *offset,
918                    uint64_t *stat)
919 {
920         if (*stat >= *offset)
921                 *stat = (uint64_t)(*stat - *offset);
922         else
923                 *stat = (uint64_t)((*stat +
924                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
925 }
926
927 static void
928 i40evf_update_stats(struct i40e_vsi *vsi,
929                                         struct i40e_eth_stats *nes)
930 {
931         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
932
933         i40evf_stat_update_48(&oes->rx_bytes,
934                             &nes->rx_bytes);
935         i40evf_stat_update_48(&oes->rx_unicast,
936                             &nes->rx_unicast);
937         i40evf_stat_update_48(&oes->rx_multicast,
938                             &nes->rx_multicast);
939         i40evf_stat_update_48(&oes->rx_broadcast,
940                             &nes->rx_broadcast);
941         i40evf_stat_update_32(&oes->rx_discards,
942                                 &nes->rx_discards);
943         i40evf_stat_update_32(&oes->rx_unknown_protocol,
944                             &nes->rx_unknown_protocol);
945         i40evf_stat_update_48(&oes->tx_bytes,
946                             &nes->tx_bytes);
947         i40evf_stat_update_48(&oes->tx_unicast,
948                             &nes->tx_unicast);
949         i40evf_stat_update_48(&oes->tx_multicast,
950                             &nes->tx_multicast);
951         i40evf_stat_update_48(&oes->tx_broadcast,
952                             &nes->tx_broadcast);
953         i40evf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
954         i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
955 }
956
957 static int
958 i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
959 {
960         int ret;
961         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
962         struct i40e_eth_stats *pstats = NULL;
963
964         /* read stat values to clear hardware registers */
965         ret = i40evf_query_stats(dev, &pstats);
966
967         /* set stats offset base on current values */
968         if (ret == 0)
969                 vf->vsi.eth_stats_offset = *pstats;
970
971         return ret;
972 }
973
974 static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
975                                       struct rte_eth_xstat_name *xstats_names,
976                                       __rte_unused unsigned limit)
977 {
978         unsigned i;
979
980         if (xstats_names != NULL)
981                 for (i = 0; i < I40EVF_NB_XSTATS; i++) {
982                         snprintf(xstats_names[i].name,
983                                 sizeof(xstats_names[i].name),
984                                 "%s", rte_i40evf_stats_strings[i].name);
985                 }
986         return I40EVF_NB_XSTATS;
987 }
988
989 static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
990                                  struct rte_eth_xstat *xstats, unsigned n)
991 {
992         int ret;
993         unsigned i;
994         struct i40e_eth_stats *pstats = NULL;
995         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
996         struct i40e_vsi *vsi = &vf->vsi;
997
998         if (n < I40EVF_NB_XSTATS)
999                 return I40EVF_NB_XSTATS;
1000
1001         ret = i40evf_query_stats(dev, &pstats);
1002         if (ret != 0)
1003                 return 0;
1004
1005         if (!xstats)
1006                 return 0;
1007
1008         i40evf_update_stats(vsi, pstats);
1009
1010         /* loop over xstats array and values from pstats */
1011         for (i = 0; i < I40EVF_NB_XSTATS; i++) {
1012                 xstats[i].id = i;
1013                 xstats[i].value = *(uint64_t *)(((char *)pstats) +
1014                         rte_i40evf_stats_strings[i].offset);
1015         }
1016
1017         return I40EVF_NB_XSTATS;
1018 }
1019
1020 static int
1021 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1022 {
1023         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1024         struct virtchnl_vlan_filter_list *vlan_list;
1025         uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1026                                                         sizeof(uint16_t)];
1027         int err;
1028         struct vf_cmd_info args;
1029
1030         vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1031         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1032         vlan_list->num_elements = 1;
1033         vlan_list->vlan_id[0] = vlanid;
1034
1035         args.ops = VIRTCHNL_OP_ADD_VLAN;
1036         args.in_args = (u8 *)&cmd_buffer;
1037         args.in_args_size = sizeof(cmd_buffer);
1038         args.out_buffer = vf->aq_resp;
1039         args.out_size = I40E_AQ_BUF_SZ;
1040         err = i40evf_execute_vf_cmd(dev, &args);
1041         if (err)
1042                 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
1043
1044         return err;
1045 }
1046
1047 static int
1048 i40evf_request_queues(struct rte_eth_dev *dev, uint16_t num)
1049 {
1050         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1051         struct virtchnl_vf_res_request vfres;
1052         struct vf_cmd_info args;
1053         int err;
1054
1055         vfres.num_queue_pairs = num;
1056
1057         args.ops = VIRTCHNL_OP_REQUEST_QUEUES;
1058         args.in_args = (u8 *)&vfres;
1059         args.in_args_size = sizeof(vfres);
1060         args.out_buffer = vf->aq_resp;
1061         args.out_size = I40E_AQ_BUF_SZ;
1062
1063         rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
1064         err = i40evf_execute_vf_cmd(dev, &args);
1065         if (err)
1066                 PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
1067
1068         rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
1069                           i40evf_dev_alarm_handler, dev);
1070         return err;
1071 }
1072
1073 static int
1074 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1075 {
1076         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1077         struct virtchnl_vlan_filter_list *vlan_list;
1078         uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1079                                                         sizeof(uint16_t)];
1080         int err;
1081         struct vf_cmd_info args;
1082
1083         vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1084         vlan_list->vsi_id = vf->vsi_res->vsi_id;
1085         vlan_list->num_elements = 1;
1086         vlan_list->vlan_id[0] = vlanid;
1087
1088         args.ops = VIRTCHNL_OP_DEL_VLAN;
1089         args.in_args = (u8 *)&cmd_buffer;
1090         args.in_args_size = sizeof(cmd_buffer);
1091         args.out_buffer = vf->aq_resp;
1092         args.out_size = I40E_AQ_BUF_SZ;
1093         err = i40evf_execute_vf_cmd(dev, &args);
1094         if (err)
1095                 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
1096
1097         return err;
1098 }
1099
1100 static const struct rte_pci_id pci_id_i40evf_map[] = {
1101         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
1102         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
1103         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
1104         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
1105         { .vendor_id = 0, /* sentinel */ },
1106 };
1107
1108 /* Disable IRQ0 */
1109 static inline void
1110 i40evf_disable_irq0(struct i40e_hw *hw)
1111 {
1112         /* Disable all interrupt types */
1113         I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, 0);
1114         I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1115                        I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1116         I40EVF_WRITE_FLUSH(hw);
1117 }
1118
1119 /* Enable IRQ0 */
1120 static inline void
1121 i40evf_enable_irq0(struct i40e_hw *hw)
1122 {
1123         /* Enable admin queue interrupt trigger */
1124         uint32_t val;
1125
1126         i40evf_disable_irq0(hw);
1127         val = I40E_READ_REG(hw, I40E_VFINT_ICR0_ENA1);
1128         val |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK |
1129                 I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK;
1130         I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, val);
1131
1132         I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1133                 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1134                 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1135                 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1136
1137         I40EVF_WRITE_FLUSH(hw);
1138 }
1139
1140 static int
1141 i40evf_check_vf_reset_done(struct rte_eth_dev *dev)
1142 {
1143         int i, reset;
1144         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1145         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1146
1147         for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1148                 reset = I40E_READ_REG(hw, I40E_VFGEN_RSTAT) &
1149                         I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1150                 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1151                 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1152                     reset == VIRTCHNL_VFR_COMPLETED)
1153                         break;
1154                 rte_delay_ms(50);
1155         }
1156
1157         if (i >= MAX_RESET_WAIT_CNT)
1158                 return -1;
1159
1160         vf->vf_reset = false;
1161         vf->pend_msg &= ~PFMSG_RESET_IMPENDING;
1162
1163         return 0;
1164 }
1165 static int
1166 i40evf_reset_vf(struct rte_eth_dev *dev)
1167 {
1168         int ret;
1169         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1170
1171         if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1172                 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1173                 return -1;
1174         }
1175         /**
1176           * After issuing vf reset command to pf, pf won't necessarily
1177           * reset vf, it depends on what state it exactly is. If it's not
1178           * initialized yet, it won't have vf reset since it's in a certain
1179           * state. If not, it will try to reset. Even vf is reset, pf will
1180           * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1181           * it to ACTIVE. In this duration, vf may not catch the moment that
1182           * COMPLETE is set. So, for vf, we'll try to wait a long time.
1183           */
1184         rte_delay_ms(200);
1185
1186         ret = i40evf_check_vf_reset_done(dev);
1187         if (ret) {
1188                 PMD_INIT_LOG(ERR, "VF is still resetting");
1189                 return ret;
1190         }
1191
1192         return 0;
1193 }
1194
1195 static int
1196 i40evf_init_vf(struct rte_eth_dev *dev)
1197 {
1198         int i, err, bufsz;
1199         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1200         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1201         uint16_t interval =
1202                 i40e_calc_itr_interval(0, 0);
1203
1204         vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1205         vf->dev_data = dev->data;
1206         err = i40e_set_mac_type(hw);
1207         if (err) {
1208                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1209                 goto err;
1210         }
1211
1212         err = i40evf_check_vf_reset_done(dev);
1213         if (err)
1214                 goto err;
1215
1216         i40e_init_adminq_parameter(hw);
1217         err = i40e_init_adminq(hw);
1218         if (err) {
1219                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1220                 goto err;
1221         }
1222
1223         /* Reset VF and wait until it's complete */
1224         if (i40evf_reset_vf(dev)) {
1225                 PMD_INIT_LOG(ERR, "reset NIC failed");
1226                 goto err_aq;
1227         }
1228
1229         /* VF reset, shutdown admin queue and initialize again */
1230         if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1231                 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1232                 goto err;
1233         }
1234
1235         i40e_init_adminq_parameter(hw);
1236         if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1237                 PMD_INIT_LOG(ERR, "init_adminq failed");
1238                 goto err;
1239         }
1240
1241         vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0);
1242         if (!vf->aq_resp) {
1243                 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
1244                 goto err_aq;
1245         }
1246         if (i40evf_check_api_version(dev) != 0) {
1247                 PMD_INIT_LOG(ERR, "check_api version failed");
1248                 goto err_api;
1249         }
1250         bufsz = sizeof(struct virtchnl_vf_resource) +
1251                 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1252         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1253         if (!vf->vf_res) {
1254                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1255                 goto err_api;
1256         }
1257
1258         if (i40evf_get_vf_resource(dev) != 0) {
1259                 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1260                 goto err_alloc;
1261         }
1262
1263         /* got VF config message back from PF, now we can parse it */
1264         for (i = 0; i < vf->vf_res->num_vsis; i++) {
1265                 if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
1266                         vf->vsi_res = &vf->vf_res->vsi_res[i];
1267         }
1268
1269         if (!vf->vsi_res) {
1270                 PMD_INIT_LOG(ERR, "no LAN VSI found");
1271                 goto err_alloc;
1272         }
1273
1274         if (hw->mac.type == I40E_MAC_X722_VF)
1275                 vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
1276         vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1277
1278         switch (vf->vsi_res->vsi_type) {
1279         case VIRTCHNL_VSI_SRIOV:
1280                 vf->vsi.type = I40E_VSI_SRIOV;
1281                 break;
1282         default:
1283                 vf->vsi.type = I40E_VSI_TYPE_UNKNOWN;
1284                 break;
1285         }
1286         vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1287         vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1288
1289         /* Store the MAC address configured by host, or generate random one */
1290         if (rte_is_valid_assigned_ether_addr(
1291                         (struct rte_ether_addr *)hw->mac.addr))
1292                 vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
1293         else
1294                 rte_eth_random_addr(hw->mac.addr); /* Generate a random one */
1295
1296         I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1297                        (I40E_ITR_INDEX_DEFAULT <<
1298                         I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1299                        (interval <<
1300                         I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT));
1301         I40EVF_WRITE_FLUSH(hw);
1302
1303         return 0;
1304
1305 err_alloc:
1306         rte_free(vf->vf_res);
1307         vf->vsi_res = NULL;
1308 err_api:
1309         rte_free(vf->aq_resp);
1310 err_aq:
1311         i40e_shutdown_adminq(hw); /* ignore error */
1312 err:
1313         return -1;
1314 }
1315
1316 static int
1317 i40evf_uninit_vf(struct rte_eth_dev *dev)
1318 {
1319         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1320
1321         PMD_INIT_FUNC_TRACE();
1322
1323         if (hw->adapter_closed == 0)
1324                 i40evf_dev_close(dev);
1325
1326         return 0;
1327 }
1328
1329 static void
1330 i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
1331                 __rte_unused uint16_t msglen)
1332 {
1333         struct virtchnl_pf_event *pf_msg =
1334                         (struct virtchnl_pf_event *)msg;
1335         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1336
1337         switch (pf_msg->event) {
1338         case VIRTCHNL_EVENT_RESET_IMPENDING:
1339                 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
1340                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1341                                               NULL);
1342                 break;
1343         case VIRTCHNL_EVENT_LINK_CHANGE:
1344                 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
1345                 vf->link_up = pf_msg->event_data.link_event.link_status;
1346                 vf->link_speed = pf_msg->event_data.link_event.link_speed;
1347                 break;
1348         case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
1349                 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
1350                 break;
1351         default:
1352                 PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
1353                 break;
1354         }
1355 }
1356
1357 static void
1358 i40evf_handle_aq_msg(struct rte_eth_dev *dev)
1359 {
1360         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1361         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1362         struct i40e_arq_event_info info;
1363         uint16_t pending, aq_opc;
1364         enum virtchnl_ops msg_opc;
1365         enum i40e_status_code msg_ret;
1366         int ret;
1367
1368         info.buf_len = I40E_AQ_BUF_SZ;
1369         if (!vf->aq_resp) {
1370                 PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
1371                 return;
1372         }
1373         info.msg_buf = vf->aq_resp;
1374
1375         pending = 1;
1376         while (pending) {
1377                 ret = i40e_clean_arq_element(hw, &info, &pending);
1378
1379                 if (ret != I40E_SUCCESS) {
1380                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
1381                                     "ret: %d", ret);
1382                         break;
1383                 }
1384                 aq_opc = rte_le_to_cpu_16(info.desc.opcode);
1385                 /* For the message sent from pf to vf, opcode is stored in
1386                  * cookie_high of struct i40e_aq_desc, while return error code
1387                  * are stored in cookie_low, Which is done by
1388                  * i40e_aq_send_msg_to_vf in PF driver.*/
1389                 msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
1390                                                   info.desc.cookie_high);
1391                 msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
1392                                                   info.desc.cookie_low);
1393                 switch (aq_opc) {
1394                 case i40e_aqc_opc_send_msg_to_vf:
1395                         if (msg_opc == VIRTCHNL_OP_EVENT)
1396                                 /* process event*/
1397                                 i40evf_handle_pf_event(dev, info.msg_buf,
1398                                                        info.msg_len);
1399                         else {
1400                                 /* read message and it's expected one */
1401                                 if (msg_opc == vf->pend_cmd) {
1402                                         vf->cmd_retval = msg_ret;
1403                                         /* prevent compiler reordering */
1404                                         rte_compiler_barrier();
1405                                         _clear_cmd(vf);
1406                                 } else
1407                                         PMD_DRV_LOG(ERR, "command mismatch,"
1408                                                 "expect %u, get %u",
1409                                                 vf->pend_cmd, msg_opc);
1410                                 PMD_DRV_LOG(DEBUG, "adminq response is received,"
1411                                              " opcode = %d", msg_opc);
1412                         }
1413                         break;
1414                 default:
1415                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1416                                     aq_opc);
1417                         break;
1418                 }
1419         }
1420 }
1421
1422 /**
1423  * Interrupt handler triggered by NIC  for handling
1424  * specific interrupt. Only adminq interrupt is processed in VF.
1425  *
1426  * @param handle
1427  *  Pointer to interrupt handle.
1428  * @param param
1429  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1430  *
1431  * @return
1432  *  void
1433  */
1434 static void
1435 i40evf_dev_alarm_handler(void *param)
1436 {
1437         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1439         uint32_t icr0;
1440
1441         i40evf_disable_irq0(hw);
1442
1443         /* read out interrupt causes */
1444         icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
1445
1446         /* No interrupt event indicated */
1447         if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK))
1448                 goto done;
1449
1450         if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
1451                 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
1452                 i40evf_handle_aq_msg(dev);
1453         }
1454
1455         /* Link Status Change interrupt */
1456         if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
1457                 PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
1458                                    " do nothing");
1459
1460 done:
1461         i40evf_enable_irq0(hw);
1462         rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
1463                           i40evf_dev_alarm_handler, dev);
1464 }
1465
1466 static int
1467 i40evf_dev_init(struct rte_eth_dev *eth_dev)
1468 {
1469         struct i40e_hw *hw
1470                 = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1471         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1472
1473         PMD_INIT_FUNC_TRACE();
1474
1475         /* assign ops func pointer */
1476         eth_dev->dev_ops = &i40evf_eth_dev_ops;
1477         eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1478         eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1479
1480         /*
1481          * For secondary processes, we don't initialise any further as primary
1482          * has already done this work.
1483          */
1484         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1485                 i40e_set_rx_function(eth_dev);
1486                 i40e_set_tx_function(eth_dev);
1487                 return 0;
1488         }
1489         i40e_set_default_ptype_table(eth_dev);
1490         rte_eth_copy_pci_info(eth_dev, pci_dev);
1491
1492         hw->vendor_id = pci_dev->id.vendor_id;
1493         hw->device_id = pci_dev->id.device_id;
1494         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1495         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1496         hw->bus.device = pci_dev->addr.devid;
1497         hw->bus.func = pci_dev->addr.function;
1498         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1499         hw->adapter_stopped = 0;
1500         hw->adapter_closed = 0;
1501
1502         /* Pass the information to the rte_eth_dev_close() that it should also
1503          * release the private port resources.
1504          */
1505         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1506
1507         if(i40evf_init_vf(eth_dev) != 0) {
1508                 PMD_INIT_LOG(ERR, "Init vf failed");
1509                 return -1;
1510         }
1511
1512         i40e_set_default_pctype_table(eth_dev);
1513         rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
1514                           i40evf_dev_alarm_handler, eth_dev);
1515
1516         /* configure and enable device interrupt */
1517         i40evf_enable_irq0(hw);
1518
1519         /* copy mac addr */
1520         eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1521                                 RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
1522                                 0);
1523         if (eth_dev->data->mac_addrs == NULL) {
1524                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1525                                 " store MAC addresses",
1526                                 RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
1527                 return -ENOMEM;
1528         }
1529         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1530                         &eth_dev->data->mac_addrs[0]);
1531
1532         return 0;
1533 }
1534
1535 static int
1536 i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
1537 {
1538         PMD_INIT_FUNC_TRACE();
1539
1540         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1541                 return -EPERM;
1542
1543         if (i40evf_uninit_vf(eth_dev) != 0) {
1544                 PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
1545                 return -1;
1546         }
1547
1548         return 0;
1549 }
1550
1551 static int eth_i40evf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1552         struct rte_pci_device *pci_dev)
1553 {
1554         return rte_eth_dev_pci_generic_probe(pci_dev,
1555                 sizeof(struct i40e_adapter), i40evf_dev_init);
1556 }
1557
1558 static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev)
1559 {
1560         return rte_eth_dev_pci_generic_remove(pci_dev, i40evf_dev_uninit);
1561 }
1562
1563 /*
1564  * virtual function driver struct
1565  */
1566 static struct rte_pci_driver rte_i40evf_pmd = {
1567         .id_table = pci_id_i40evf_map,
1568         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1569         .probe = eth_i40evf_pci_probe,
1570         .remove = eth_i40evf_pci_remove,
1571 };
1572
1573 RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd);
1574 RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
1575 RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio-pci");
1576
1577 static int
1578 i40evf_dev_configure(struct rte_eth_dev *dev)
1579 {
1580         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1581         struct i40e_adapter *ad =
1582                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1583         uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1584                                 dev->data->nb_tx_queues);
1585
1586         /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1587          * allocation or vector Rx preconditions we will reset it.
1588          */
1589         ad->rx_bulk_alloc_allowed = true;
1590         ad->rx_vec_allowed = true;
1591         ad->tx_simple_allowed = true;
1592         ad->tx_vec_allowed = true;
1593
1594         if (num_queue_pairs > vf->vsi_res->num_queue_pairs) {
1595                 int ret = 0;
1596
1597                 PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
1598                             vf->vsi_res->num_queue_pairs, num_queue_pairs);
1599                 ret = i40evf_request_queues(dev, num_queue_pairs);
1600                 if (ret != 0)
1601                         return ret;
1602
1603                 ret = i40evf_dev_reset(dev);
1604                 if (ret != 0)
1605                         return ret;
1606         }
1607
1608         return i40evf_init_vlan(dev);
1609 }
1610
1611 static int
1612 i40evf_init_vlan(struct rte_eth_dev *dev)
1613 {
1614         /* Apply vlan offload setting */
1615         i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1616
1617         return 0;
1618 }
1619
1620 static int
1621 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1622 {
1623         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1624         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1625
1626         if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1627                 return -ENOTSUP;
1628
1629         /* Vlan stripping setting */
1630         if (mask & ETH_VLAN_STRIP_MASK) {
1631                 /* Enable or disable VLAN stripping */
1632                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1633                         i40evf_enable_vlan_strip(dev);
1634                 else
1635                         i40evf_disable_vlan_strip(dev);
1636         }
1637
1638         return 0;
1639 }
1640
1641 static int
1642 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1643 {
1644         struct i40e_rx_queue *rxq;
1645         int err;
1646         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1647
1648         PMD_INIT_FUNC_TRACE();
1649
1650         rxq = dev->data->rx_queues[rx_queue_id];
1651
1652         err = i40e_alloc_rx_queue_mbufs(rxq);
1653         if (err) {
1654                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1655                 return err;
1656         }
1657
1658         rte_wmb();
1659
1660         /* Init the RX tail register. */
1661         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1662         I40EVF_WRITE_FLUSH(hw);
1663
1664         /* Ready to switch the queue on */
1665         err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1666         if (err) {
1667                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1668                             rx_queue_id);
1669                 return err;
1670         }
1671         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1672
1673         return 0;
1674 }
1675
1676 static int
1677 i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1678 {
1679         struct i40e_rx_queue *rxq;
1680         int err;
1681
1682         rxq = dev->data->rx_queues[rx_queue_id];
1683
1684         err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1685         if (err) {
1686                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1687                             rx_queue_id);
1688                 return err;
1689         }
1690
1691         i40e_rx_queue_release_mbufs(rxq);
1692         i40e_reset_rx_queue(rxq);
1693         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1694
1695         return 0;
1696 }
1697
1698 static int
1699 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1700 {
1701         int err;
1702
1703         PMD_INIT_FUNC_TRACE();
1704
1705         /* Ready to switch the queue on */
1706         err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1707         if (err) {
1708                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1709                             tx_queue_id);
1710                 return err;
1711         }
1712         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1713
1714         return 0;
1715 }
1716
1717 static int
1718 i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1719 {
1720         struct i40e_tx_queue *txq;
1721         int err;
1722
1723         txq = dev->data->tx_queues[tx_queue_id];
1724
1725         err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1726         if (err) {
1727                 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1728                             tx_queue_id);
1729                 return err;
1730         }
1731
1732         i40e_tx_queue_release_mbufs(txq);
1733         i40e_reset_tx_queue(txq);
1734         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1735
1736         return 0;
1737 }
1738
1739 static int
1740 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1741 {
1742         int ret;
1743
1744         if (on)
1745                 ret = i40evf_add_vlan(dev, vlan_id);
1746         else
1747                 ret = i40evf_del_vlan(dev,vlan_id);
1748
1749         return ret;
1750 }
1751
1752 static int
1753 i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
1754 {
1755         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1756         struct rte_eth_dev_data *dev_data = dev->data;
1757         struct rte_pktmbuf_pool_private *mbp_priv;
1758         uint16_t buf_size, len;
1759
1760         rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
1761         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1762         I40EVF_WRITE_FLUSH(hw);
1763
1764         /* Calculate the maximum packet length allowed */
1765         mbp_priv = rte_mempool_get_priv(rxq->mp);
1766         buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
1767                                         RTE_PKTMBUF_HEADROOM);
1768         rxq->hs_mode = i40e_header_split_none;
1769         rxq->rx_hdr_len = 0;
1770         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
1771         len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
1772         rxq->max_pkt_len = RTE_MIN(len,
1773                 dev_data->dev_conf.rxmode.max_rx_pkt_len);
1774
1775         /**
1776          * Check if the jumbo frame and maximum packet length are set correctly
1777          */
1778         if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1779                 if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
1780                     rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1781                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1782                                 "larger than %u and smaller than %u, as jumbo "
1783                                 "frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN,
1784                                         (uint32_t)I40E_FRAME_SIZE_MAX);
1785                         return I40E_ERR_CONFIG;
1786                 }
1787         } else {
1788                 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
1789                     rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
1790                         PMD_DRV_LOG(ERR, "maximum packet length must be "
1791                                 "larger than %u and smaller than %u, as jumbo "
1792                                 "frame is disabled",
1793                                 (uint32_t)RTE_ETHER_MIN_LEN,
1794                                 (uint32_t)RTE_ETHER_MAX_LEN);
1795                         return I40E_ERR_CONFIG;
1796                 }
1797         }
1798
1799         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
1800             rxq->max_pkt_len > buf_size)
1801                 dev_data->scattered_rx = 1;
1802
1803         return 0;
1804 }
1805
1806 static int
1807 i40evf_rx_init(struct rte_eth_dev *dev)
1808 {
1809         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1810         uint16_t i;
1811         int ret = I40E_SUCCESS;
1812         struct i40e_rx_queue **rxq =
1813                 (struct i40e_rx_queue **)dev->data->rx_queues;
1814
1815         i40evf_config_rss(vf);
1816         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1817                 if (!rxq[i] || !rxq[i]->q_set)
1818                         continue;
1819                 ret = i40evf_rxq_init(dev, rxq[i]);
1820                 if (ret != I40E_SUCCESS)
1821                         break;
1822         }
1823         if (ret == I40E_SUCCESS)
1824                 i40e_set_rx_function(dev);
1825
1826         return ret;
1827 }
1828
1829 static void
1830 i40evf_tx_init(struct rte_eth_dev *dev)
1831 {
1832         uint16_t i;
1833         struct i40e_tx_queue **txq =
1834                 (struct i40e_tx_queue **)dev->data->tx_queues;
1835         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1836
1837         for (i = 0; i < dev->data->nb_tx_queues; i++)
1838                 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1839
1840         i40e_set_tx_function(dev);
1841 }
1842
1843 static inline void
1844 i40evf_enable_queues_intr(struct rte_eth_dev *dev)
1845 {
1846         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1847         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1848         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1849
1850         if (!rte_intr_allow_others(intr_handle)) {
1851                 I40E_WRITE_REG(hw,
1852                                I40E_VFINT_DYN_CTL01,
1853                                I40E_VFINT_DYN_CTL01_INTENA_MASK |
1854                                I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1855                                I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1856                 I40EVF_WRITE_FLUSH(hw);
1857                 return;
1858         }
1859
1860         I40EVF_WRITE_FLUSH(hw);
1861 }
1862
1863 static inline void
1864 i40evf_disable_queues_intr(struct rte_eth_dev *dev)
1865 {
1866         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1867         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1868         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1869
1870         if (!rte_intr_allow_others(intr_handle)) {
1871                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1872                                I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1873                 I40EVF_WRITE_FLUSH(hw);
1874                 return;
1875         }
1876
1877         I40EVF_WRITE_FLUSH(hw);
1878 }
1879
1880 static int
1881 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1882 {
1883         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1884         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1885         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1886         uint16_t interval =
1887                 i40e_calc_itr_interval(0, 0);
1888         uint16_t msix_intr;
1889
1890         msix_intr = intr_handle->intr_vec[queue_id];
1891         if (msix_intr == I40E_MISC_VEC_ID)
1892                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1893                                I40E_VFINT_DYN_CTL01_INTENA_MASK |
1894                                I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1895                                (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
1896                                (interval <<
1897                                 I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT));
1898         else
1899                 I40E_WRITE_REG(hw,
1900                                I40E_VFINT_DYN_CTLN1(msix_intr -
1901                                                     I40E_RX_VEC_START),
1902                                I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1903                                I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1904                                (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1905                                (interval <<
1906                                 I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
1907
1908         I40EVF_WRITE_FLUSH(hw);
1909
1910         return 0;
1911 }
1912
1913 static int
1914 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1915 {
1916         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1917         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1918         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1919         uint16_t msix_intr;
1920
1921         msix_intr = intr_handle->intr_vec[queue_id];
1922         if (msix_intr == I40E_MISC_VEC_ID)
1923                 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1924         else
1925                 I40E_WRITE_REG(hw,
1926                                I40E_VFINT_DYN_CTLN1(msix_intr -
1927                                                     I40E_RX_VEC_START),
1928                                0);
1929
1930         I40EVF_WRITE_FLUSH(hw);
1931
1932         return 0;
1933 }
1934
1935 static void
1936 i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
1937 {
1938         struct virtchnl_ether_addr_list *list;
1939         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1940         int err, i, j;
1941         int next_begin = 0;
1942         int begin = 0;
1943         uint32_t len;
1944         struct rte_ether_addr *addr;
1945         struct vf_cmd_info args;
1946
1947         do {
1948                 j = 0;
1949                 len = sizeof(struct virtchnl_ether_addr_list);
1950                 for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
1951                         if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
1952                                 continue;
1953                         len += sizeof(struct virtchnl_ether_addr);
1954                         if (len >= I40E_AQ_BUF_SZ) {
1955                                 next_begin = i + 1;
1956                                 break;
1957                         }
1958                 }
1959
1960                 list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
1961                 if (!list) {
1962                         PMD_DRV_LOG(ERR, "fail to allocate memory");
1963                         return;
1964                 }
1965
1966                 for (i = begin; i < next_begin; i++) {
1967                         addr = &dev->data->mac_addrs[i];
1968                         if (rte_is_zero_ether_addr(addr))
1969                                 continue;
1970                         rte_memcpy(list->list[j].addr, addr->addr_bytes,
1971                                          sizeof(addr->addr_bytes));
1972                         PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
1973                                     addr->addr_bytes[0], addr->addr_bytes[1],
1974                                     addr->addr_bytes[2], addr->addr_bytes[3],
1975                                     addr->addr_bytes[4], addr->addr_bytes[5]);
1976                         j++;
1977                 }
1978                 list->vsi_id = vf->vsi_res->vsi_id;
1979                 list->num_elements = j;
1980                 args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
1981                            VIRTCHNL_OP_DEL_ETH_ADDR;
1982                 args.in_args = (uint8_t *)list;
1983                 args.in_args_size = len;
1984                 args.out_buffer = vf->aq_resp;
1985                 args.out_size = I40E_AQ_BUF_SZ;
1986                 err = i40evf_execute_vf_cmd(dev, &args);
1987                 if (err) {
1988                         PMD_DRV_LOG(ERR, "fail to execute command %s",
1989                                     add ? "OP_ADD_ETHER_ADDRESS" :
1990                                     "OP_DEL_ETHER_ADDRESS");
1991                 } else {
1992                         if (add)
1993                                 vf->vsi.mac_num++;
1994                         else
1995                                 vf->vsi.mac_num--;
1996                 }
1997                 rte_free(list);
1998                 begin = next_begin;
1999         } while (begin < I40E_NUM_MACADDR_MAX);
2000 }
2001
2002 static int
2003 i40evf_dev_start(struct rte_eth_dev *dev)
2004 {
2005         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2006         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2007         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2008         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2009         uint32_t intr_vector = 0;
2010
2011         PMD_INIT_FUNC_TRACE();
2012
2013         hw->adapter_stopped = 0;
2014
2015         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
2016         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
2017                                         dev->data->nb_tx_queues);
2018
2019         /* check and configure queue intr-vector mapping */
2020         if (rte_intr_cap_multiple(intr_handle) &&
2021             dev->data->dev_conf.intr_conf.rxq) {
2022                 intr_vector = dev->data->nb_rx_queues;
2023                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2024                         return -1;
2025         }
2026
2027         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2028                 intr_handle->intr_vec =
2029                         rte_zmalloc("intr_vec",
2030                                     dev->data->nb_rx_queues * sizeof(int), 0);
2031                 if (!intr_handle->intr_vec) {
2032                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2033                                      " intr_vec", dev->data->nb_rx_queues);
2034                         return -ENOMEM;
2035                 }
2036         }
2037
2038         if (i40evf_rx_init(dev) != 0){
2039                 PMD_DRV_LOG(ERR, "failed to do RX init");
2040                 return -1;
2041         }
2042
2043         i40evf_tx_init(dev);
2044
2045         if (i40evf_configure_vsi_queues(dev) != 0) {
2046                 PMD_DRV_LOG(ERR, "configure queues failed");
2047                 goto err_queue;
2048         }
2049         if (i40evf_config_irq_map(dev)) {
2050                 PMD_DRV_LOG(ERR, "config_irq_map failed");
2051                 goto err_queue;
2052         }
2053
2054         /* Set all mac addrs */
2055         i40evf_add_del_all_mac_addr(dev, TRUE);
2056         /* Set all multicast addresses */
2057         i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
2058                                 TRUE);
2059
2060         if (i40evf_start_queues(dev) != 0) {
2061                 PMD_DRV_LOG(ERR, "enable queues failed");
2062                 goto err_mac;
2063         }
2064
2065         /* only enable interrupt in rx interrupt mode */
2066         if (dev->data->dev_conf.intr_conf.rxq != 0)
2067                 rte_intr_enable(intr_handle);
2068
2069         i40evf_enable_queues_intr(dev);
2070
2071         return 0;
2072
2073 err_mac:
2074         i40evf_add_del_all_mac_addr(dev, FALSE);
2075         i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
2076                                 FALSE);
2077 err_queue:
2078         return -1;
2079 }
2080
2081 static void
2082 i40evf_dev_stop(struct rte_eth_dev *dev)
2083 {
2084         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2085         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2086         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2087         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2088
2089         PMD_INIT_FUNC_TRACE();
2090
2091         if (dev->data->dev_conf.intr_conf.rxq != 0)
2092                 rte_intr_disable(intr_handle);
2093
2094         if (hw->adapter_stopped == 1)
2095                 return;
2096         i40evf_stop_queues(dev);
2097         i40evf_disable_queues_intr(dev);
2098         i40e_dev_clear_queues(dev);
2099
2100         /* Clean datapath event and queue/vec mapping */
2101         rte_intr_efd_disable(intr_handle);
2102         if (intr_handle->intr_vec) {
2103                 rte_free(intr_handle->intr_vec);
2104                 intr_handle->intr_vec = NULL;
2105         }
2106         /* remove all mac addrs */
2107         i40evf_add_del_all_mac_addr(dev, FALSE);
2108         /* remove all multicast addresses */
2109         i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
2110                                 FALSE);
2111         hw->adapter_stopped = 1;
2112
2113 }
2114
2115 static int
2116 i40evf_dev_link_update(struct rte_eth_dev *dev,
2117                        __rte_unused int wait_to_complete)
2118 {
2119         struct rte_eth_link new_link;
2120         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2121         /*
2122          * DPDK pf host provide interfacet to acquire link status
2123          * while Linux driver does not
2124          */
2125
2126         memset(&new_link, 0, sizeof(new_link));
2127         /* Linux driver PF host */
2128         switch (vf->link_speed) {
2129         case I40E_LINK_SPEED_100MB:
2130                 new_link.link_speed = ETH_SPEED_NUM_100M;
2131                 break;
2132         case I40E_LINK_SPEED_1GB:
2133                 new_link.link_speed = ETH_SPEED_NUM_1G;
2134                 break;
2135         case I40E_LINK_SPEED_10GB:
2136                 new_link.link_speed = ETH_SPEED_NUM_10G;
2137                 break;
2138         case I40E_LINK_SPEED_20GB:
2139                 new_link.link_speed = ETH_SPEED_NUM_20G;
2140                 break;
2141         case I40E_LINK_SPEED_25GB:
2142                 new_link.link_speed = ETH_SPEED_NUM_25G;
2143                 break;
2144         case I40E_LINK_SPEED_40GB:
2145                 new_link.link_speed = ETH_SPEED_NUM_40G;
2146                 break;
2147         default:
2148                 new_link.link_speed = ETH_SPEED_NUM_NONE;
2149                 break;
2150         }
2151         /* full duplex only */
2152         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
2153         new_link.link_status = vf->link_up &&
2154                                 new_link.link_speed != ETH_SPEED_NUM_NONE
2155                                 ? ETH_LINK_UP
2156                                 : ETH_LINK_DOWN;
2157         new_link.link_autoneg =
2158                 !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
2159
2160         return rte_eth_linkstatus_set(dev, &new_link);
2161 }
2162
2163 static int
2164 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
2165 {
2166         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2167         int ret;
2168
2169         ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
2170         if (ret == 0)
2171                 vf->promisc_unicast_enabled = TRUE;
2172         else
2173                 ret = -EAGAIN;
2174
2175         return ret;
2176 }
2177
2178 static int
2179 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
2180 {
2181         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2182         int ret;
2183
2184         ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
2185         if (ret == 0)
2186                 vf->promisc_unicast_enabled = FALSE;
2187         else
2188                 ret = -EAGAIN;
2189
2190         return ret;
2191 }
2192
2193 static int
2194 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
2195 {
2196         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2197         int ret;
2198
2199         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
2200         if (ret == 0)
2201                 vf->promisc_multicast_enabled = TRUE;
2202         else
2203                 ret = -EAGAIN;
2204
2205         return ret;
2206 }
2207
2208 static int
2209 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
2210 {
2211         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2212         int ret;
2213
2214         ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
2215         if (ret == 0)
2216                 vf->promisc_multicast_enabled = FALSE;
2217         else
2218                 ret = -EAGAIN;
2219
2220         return ret;
2221 }
2222
2223 static int
2224 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2225 {
2226         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2227
2228         dev_info->max_rx_queues = I40E_MAX_QP_NUM_PER_VF;
2229         dev_info->max_tx_queues = I40E_MAX_QP_NUM_PER_VF;
2230         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2231         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2232         dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
2233         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2234         dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2235         dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
2236         dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
2237         dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
2238         dev_info->rx_queue_offload_capa = 0;
2239         dev_info->rx_offload_capa =
2240                 DEV_RX_OFFLOAD_VLAN_STRIP |
2241                 DEV_RX_OFFLOAD_QINQ_STRIP |
2242                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2243                 DEV_RX_OFFLOAD_UDP_CKSUM |
2244                 DEV_RX_OFFLOAD_TCP_CKSUM |
2245                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
2246                 DEV_RX_OFFLOAD_SCATTER |
2247                 DEV_RX_OFFLOAD_JUMBO_FRAME |
2248                 DEV_RX_OFFLOAD_VLAN_FILTER;
2249
2250         dev_info->tx_queue_offload_capa = 0;
2251         dev_info->tx_offload_capa =
2252                 DEV_TX_OFFLOAD_VLAN_INSERT |
2253                 DEV_TX_OFFLOAD_QINQ_INSERT |
2254                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2255                 DEV_TX_OFFLOAD_UDP_CKSUM |
2256                 DEV_TX_OFFLOAD_TCP_CKSUM |
2257                 DEV_TX_OFFLOAD_SCTP_CKSUM |
2258                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2259                 DEV_TX_OFFLOAD_TCP_TSO |
2260                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
2261                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
2262                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
2263                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
2264                 DEV_TX_OFFLOAD_MULTI_SEGS;
2265
2266         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2267                 .rx_thresh = {
2268                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
2269                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
2270                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
2271                 },
2272                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2273                 .rx_drop_en = 0,
2274                 .offloads = 0,
2275         };
2276
2277         dev_info->default_txconf = (struct rte_eth_txconf) {
2278                 .tx_thresh = {
2279                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
2280                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
2281                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
2282                 },
2283                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2284                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2285                 .offloads = 0,
2286         };
2287
2288         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2289                 .nb_max = I40E_MAX_RING_DESC,
2290                 .nb_min = I40E_MIN_RING_DESC,
2291                 .nb_align = I40E_ALIGN_RING_DESC,
2292         };
2293
2294         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2295                 .nb_max = I40E_MAX_RING_DESC,
2296                 .nb_min = I40E_MIN_RING_DESC,
2297                 .nb_align = I40E_ALIGN_RING_DESC,
2298         };
2299
2300         return 0;
2301 }
2302
2303 static int
2304 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2305 {
2306         int ret;
2307         struct i40e_eth_stats *pstats = NULL;
2308         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2309         struct i40e_vsi *vsi = &vf->vsi;
2310
2311         ret = i40evf_query_stats(dev, &pstats);
2312         if (ret == 0) {
2313                 i40evf_update_stats(vsi, pstats);
2314
2315                 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
2316                                                 pstats->rx_broadcast;
2317                 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
2318                                                 pstats->tx_unicast;
2319                 stats->imissed = pstats->rx_discards;
2320                 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
2321                 stats->ibytes = pstats->rx_bytes;
2322                 stats->obytes = pstats->tx_bytes;
2323         } else {
2324                 PMD_DRV_LOG(ERR, "Get statistics failed");
2325         }
2326         return ret;
2327 }
2328
2329 static void
2330 i40evf_dev_close(struct rte_eth_dev *dev)
2331 {
2332         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2333         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2334
2335         i40evf_dev_stop(dev);
2336         i40e_dev_free_queues(dev);
2337         /*
2338          * disable promiscuous mode before reset vf
2339          * it is a workaround solution when work with kernel driver
2340          * and it is not the normal way
2341          */
2342         i40evf_dev_promiscuous_disable(dev);
2343         i40evf_dev_allmulticast_disable(dev);
2344         rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
2345
2346         i40evf_reset_vf(dev);
2347         i40e_shutdown_adminq(hw);
2348         i40evf_disable_irq0(hw);
2349
2350         dev->dev_ops = NULL;
2351         dev->rx_pkt_burst = NULL;
2352         dev->tx_pkt_burst = NULL;
2353
2354         rte_free(vf->vf_res);
2355         vf->vf_res = NULL;
2356         rte_free(vf->aq_resp);
2357         vf->aq_resp = NULL;
2358
2359         hw->adapter_closed = 1;
2360 }
2361
2362 /*
2363  * Reset VF device only to re-initialize resources in PMD layer
2364  */
2365 static int
2366 i40evf_dev_reset(struct rte_eth_dev *dev)
2367 {
2368         int ret;
2369
2370         ret = i40evf_dev_uninit(dev);
2371         if (ret)
2372                 return ret;
2373
2374         ret = i40evf_dev_init(dev);
2375
2376         return ret;
2377 }
2378
2379 static int
2380 i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2381 {
2382         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2383         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2384         int ret;
2385
2386         if (!lut)
2387                 return -EINVAL;
2388
2389         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2390                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
2391                                           lut, lut_size);
2392                 if (ret) {
2393                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2394                         return ret;
2395                 }
2396         } else {
2397                 uint32_t *lut_dw = (uint32_t *)lut;
2398                 uint16_t i, lut_size_dw = lut_size / 4;
2399
2400                 for (i = 0; i < lut_size_dw; i++)
2401                         lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
2402         }
2403
2404         return 0;
2405 }
2406
2407 static int
2408 i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2409 {
2410         struct i40e_vf *vf;
2411         struct i40e_hw *hw;
2412         int ret;
2413
2414         if (!vsi || !lut)
2415                 return -EINVAL;
2416
2417         vf = I40E_VSI_TO_VF(vsi);
2418         hw = I40E_VSI_TO_HW(vsi);
2419
2420         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2421                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
2422                                           lut, lut_size);
2423                 if (ret) {
2424                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2425                         return ret;
2426                 }
2427         } else {
2428                 uint32_t *lut_dw = (uint32_t *)lut;
2429                 uint16_t i, lut_size_dw = lut_size / 4;
2430
2431                 for (i = 0; i < lut_size_dw; i++)
2432                         I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
2433                 I40EVF_WRITE_FLUSH(hw);
2434         }
2435
2436         return 0;
2437 }
2438
2439 static int
2440 i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
2441                            struct rte_eth_rss_reta_entry64 *reta_conf,
2442                            uint16_t reta_size)
2443 {
2444         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2445         uint8_t *lut;
2446         uint16_t i, idx, shift;
2447         int ret;
2448
2449         if (reta_size != ETH_RSS_RETA_SIZE_64) {
2450                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2451                         "(%d) doesn't match the number of hardware can "
2452                         "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
2453                 return -EINVAL;
2454         }
2455
2456         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2457         if (!lut) {
2458                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2459                 return -ENOMEM;
2460         }
2461         ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2462         if (ret)
2463                 goto out;
2464         for (i = 0; i < reta_size; i++) {
2465                 idx = i / RTE_RETA_GROUP_SIZE;
2466                 shift = i % RTE_RETA_GROUP_SIZE;
2467                 if (reta_conf[idx].mask & (1ULL << shift))
2468                         lut[i] = reta_conf[idx].reta[shift];
2469         }
2470         ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
2471
2472 out:
2473         rte_free(lut);
2474
2475         return ret;
2476 }
2477
2478 static int
2479 i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
2480                           struct rte_eth_rss_reta_entry64 *reta_conf,
2481                           uint16_t reta_size)
2482 {
2483         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2484         uint16_t i, idx, shift;
2485         uint8_t *lut;
2486         int ret;
2487
2488         if (reta_size != ETH_RSS_RETA_SIZE_64) {
2489                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2490                         "(%d) doesn't match the number of hardware can "
2491                         "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
2492                 return -EINVAL;
2493         }
2494
2495         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2496         if (!lut) {
2497                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2498                 return -ENOMEM;
2499         }
2500
2501         ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2502         if (ret)
2503                 goto out;
2504         for (i = 0; i < reta_size; i++) {
2505                 idx = i / RTE_RETA_GROUP_SIZE;
2506                 shift = i % RTE_RETA_GROUP_SIZE;
2507                 if (reta_conf[idx].mask & (1ULL << shift))
2508                         reta_conf[idx].reta[shift] = lut[i];
2509         }
2510
2511 out:
2512         rte_free(lut);
2513
2514         return ret;
2515 }
2516
2517 static int
2518 i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
2519 {
2520         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2521         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2522         int ret = 0;
2523
2524         if (!key || key_len == 0) {
2525                 PMD_DRV_LOG(DEBUG, "No key to be configured");
2526                 return 0;
2527         } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2528                 sizeof(uint32_t)) {
2529                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2530                 return -EINVAL;
2531         }
2532
2533         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2534                 struct i40e_aqc_get_set_rss_key_data *key_dw =
2535                         (struct i40e_aqc_get_set_rss_key_data *)key;
2536
2537                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
2538                 if (ret)
2539                         PMD_INIT_LOG(ERR, "Failed to configure RSS key "
2540                                      "via AQ");
2541         } else {
2542                 uint32_t *hash_key = (uint32_t *)key;
2543                 uint16_t i;
2544
2545                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2546                         i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]);
2547                 I40EVF_WRITE_FLUSH(hw);
2548         }
2549
2550         return ret;
2551 }
2552
2553 static int
2554 i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
2555 {
2556         struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2557         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2558         int ret;
2559
2560         if (!key || !key_len)
2561                 return -EINVAL;
2562
2563         if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2564                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
2565                         (struct i40e_aqc_get_set_rss_key_data *)key);
2566                 if (ret) {
2567                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
2568                         return ret;
2569                 }
2570         } else {
2571                 uint32_t *key_dw = (uint32_t *)key;
2572                 uint16_t i;
2573
2574                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2575                         key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i));
2576         }
2577         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2578
2579         return 0;
2580 }
2581
2582 static int
2583 i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
2584 {
2585         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2586         uint64_t hena;
2587         int ret;
2588
2589         ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
2590                                  rss_conf->rss_key_len);
2591         if (ret)
2592                 return ret;
2593
2594         hena = i40e_config_hena(vf->adapter, rss_conf->rss_hf);
2595         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2596         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2597         I40EVF_WRITE_FLUSH(hw);
2598
2599         return 0;
2600 }
2601
2602 static void
2603 i40evf_disable_rss(struct i40e_vf *vf)
2604 {
2605         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2606
2607         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), 0);
2608         i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), 0);
2609         I40EVF_WRITE_FLUSH(hw);
2610 }
2611
2612 static int
2613 i40evf_config_rss(struct i40e_vf *vf)
2614 {
2615         struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2616         struct rte_eth_rss_conf rss_conf;
2617         uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
2618         uint32_t rss_lut_size = (I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4;
2619         uint16_t num;
2620         uint8_t *lut_info;
2621         int ret;
2622
2623         if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
2624                 i40evf_disable_rss(vf);
2625                 PMD_DRV_LOG(DEBUG, "RSS not configured");
2626                 return 0;
2627         }
2628
2629         num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
2630         /* Fill out the look up table */
2631         if (!(vf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) {
2632                 for (i = 0, j = 0; i < nb_q; i++, j++) {
2633                         if (j >= num)
2634                                 j = 0;
2635                         lut = (lut << 8) | j;
2636                         if ((i & 3) == 3)
2637                                 I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
2638                 }
2639         } else {
2640                 lut_info = rte_zmalloc("i40e_rss_lut", rss_lut_size, 0);
2641                 if (!lut_info) {
2642                         PMD_DRV_LOG(ERR, "No memory can be allocated");
2643                         return -ENOMEM;
2644                 }
2645
2646                 for (i = 0; i < rss_lut_size; i++)
2647                         lut_info[i] = i % vf->num_queue_pairs;
2648
2649                 ret = i40evf_set_rss_lut(&vf->vsi, lut_info,
2650                                          rss_lut_size);
2651                 rte_free(lut_info);
2652                 if (ret)
2653                         return ret;
2654         }
2655
2656         rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
2657         if ((rss_conf.rss_hf & vf->adapter->flow_types_mask) == 0) {
2658                 i40evf_disable_rss(vf);
2659                 PMD_DRV_LOG(DEBUG, "No hash flag is set");
2660                 return 0;
2661         }
2662
2663         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
2664                 (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
2665                 /* Calculate the default hash key */
2666                 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2667                         rss_key_default[i] = (uint32_t)rte_rand();
2668                 rss_conf.rss_key = (uint8_t *)rss_key_default;
2669                 rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2670                         sizeof(uint32_t);
2671         }
2672
2673         return i40evf_hw_rss_hash_set(vf, &rss_conf);
2674 }
2675
2676 static int
2677 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
2678                            struct rte_eth_rss_conf *rss_conf)
2679 {
2680         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2681         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2682         uint64_t rss_hf = rss_conf->rss_hf & vf->adapter->flow_types_mask;
2683         uint64_t hena;
2684
2685         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2686         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2687
2688         if (!(hena & vf->adapter->pctypes_mask)) { /* RSS disabled */
2689                 if (rss_hf != 0) /* Enable RSS */
2690                         return -EINVAL;
2691                 return 0;
2692         }
2693
2694         /* RSS enabled */
2695         if (rss_hf == 0) /* Disable RSS */
2696                 return -EINVAL;
2697
2698         return i40evf_hw_rss_hash_set(vf, rss_conf);
2699 }
2700
2701 static int
2702 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2703                              struct rte_eth_rss_conf *rss_conf)
2704 {
2705         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2706         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2707         uint64_t hena;
2708
2709         i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
2710                            &rss_conf->rss_key_len);
2711
2712         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2713         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2714         rss_conf->rss_hf = i40e_parse_hena(vf->adapter, hena);
2715
2716         return 0;
2717 }
2718
2719 static int
2720 i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2721 {
2722         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2723         struct rte_eth_dev_data *dev_data = vf->dev_data;
2724         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
2725         int ret = 0;
2726
2727         /* check if mtu is within the allowed range */
2728         if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
2729                 return -EINVAL;
2730
2731         /* mtu setting is forbidden if port is start */
2732         if (dev_data->dev_started) {
2733                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
2734                             dev_data->port_id);
2735                 return -EBUSY;
2736         }
2737
2738         if (frame_size > RTE_ETHER_MAX_LEN)
2739                 dev_data->dev_conf.rxmode.offloads |=
2740                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2741         else
2742                 dev_data->dev_conf.rxmode.offloads &=
2743                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2744         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2745
2746         return ret;
2747 }
2748
2749 static int
2750 i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
2751                             struct rte_ether_addr *mac_addr)
2752 {
2753         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2754         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2755
2756         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
2757                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2758                 return -EINVAL;
2759         }
2760
2761         if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
2762                 return -EPERM;
2763
2764         i40evf_del_mac_addr_by_addr(dev, (struct rte_ether_addr *)hw->mac.addr);
2765
2766         if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)
2767                 return -EIO;
2768
2769         rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
2770         return 0;
2771 }
2772
2773 static int
2774 i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
2775                         struct rte_ether_addr *mc_addrs,
2776                         uint32_t mc_addrs_num, bool add)
2777 {
2778         struct virtchnl_ether_addr_list *list;
2779         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2780         uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
2781                 (I40E_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))];
2782         uint32_t i;
2783         int err;
2784         struct vf_cmd_info args;
2785
2786         if (mc_addrs == NULL || mc_addrs_num == 0)
2787                 return 0;
2788
2789         if (mc_addrs_num > I40E_NUM_MACADDR_MAX)
2790                 return -EINVAL;
2791
2792         list = (struct virtchnl_ether_addr_list *)cmd_buffer;
2793         list->vsi_id = vf->vsi_res->vsi_id;
2794         list->num_elements = mc_addrs_num;
2795
2796         for (i = 0; i < mc_addrs_num; i++) {
2797                 if (!I40E_IS_MULTICAST(mc_addrs[i].addr_bytes)) {
2798                         PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
2799                                     mc_addrs[i].addr_bytes[0],
2800                                     mc_addrs[i].addr_bytes[1],
2801                                     mc_addrs[i].addr_bytes[2],
2802                                     mc_addrs[i].addr_bytes[3],
2803                                     mc_addrs[i].addr_bytes[4],
2804                                     mc_addrs[i].addr_bytes[5]);
2805                         return -EINVAL;
2806                 }
2807
2808                 memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
2809                         sizeof(list->list[i].addr));
2810         }
2811
2812         args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
2813         args.in_args = cmd_buffer;
2814         args.in_args_size = sizeof(struct virtchnl_ether_addr_list) +
2815                 i * sizeof(struct virtchnl_ether_addr);
2816         args.out_buffer = vf->aq_resp;
2817         args.out_size = I40E_AQ_BUF_SZ;
2818         err = i40evf_execute_vf_cmd(dev, &args);
2819         if (err) {
2820                 PMD_DRV_LOG(ERR, "fail to execute command %s",
2821                         add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
2822                 return err;
2823         }
2824
2825         return 0;
2826 }
2827
2828 static int
2829 i40evf_set_mc_addr_list(struct rte_eth_dev *dev,
2830                         struct rte_ether_addr *mc_addrs,
2831                         uint32_t mc_addrs_num)
2832 {
2833         struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2834         int err;
2835
2836         /* flush previous addresses */
2837         err = i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
2838                                 FALSE);
2839         if (err)
2840                 return err;
2841
2842         vf->mc_addrs_num = 0;
2843
2844         /* add new ones */
2845         err = i40evf_add_del_mc_addr_list(dev, mc_addrs, mc_addrs_num,
2846                                         TRUE);
2847         if (err)
2848                 return err;
2849
2850         vf->mc_addrs_num = mc_addrs_num;
2851         memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
2852
2853         return 0;
2854 }
2855
2856 bool
2857 is_i40evf_supported(struct rte_eth_dev *dev)
2858 {
2859         return is_device_supported(dev, &rte_i40evf_pmd);
2860 }