4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
46 #include <rte_interrupts.h>
48 #include <rte_debug.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_tailq.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
63 #include "i40e_logs.h"
64 #include "i40e/i40e_prototype.h"
65 #include "i40e/i40e_adminq_cmd.h"
66 #include "i40e/i40e_type.h"
68 #include "i40e_rxtx.h"
69 #include "i40e_ethdev.h"
71 #define I40EVF_VSI_DEFAULT_MSIX_INTR 1
73 /* busy wait delay in msec */
74 #define I40EVF_BUSY_WAIT_DELAY 10
75 #define I40EVF_BUSY_WAIT_COUNT 50
76 #define MAX_RESET_WAIT_CNT 20
78 struct i40evf_arq_msg_info {
79 enum i40e_virtchnl_ops ops;
80 enum i40e_status_code result;
87 enum i40e_virtchnl_ops ops;
89 uint32_t in_args_size;
91 /* Input & output type. pass in buffer size and pass out
92 * actual return result
97 enum i40evf_aq_result {
98 I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
99 I40EVF_MSG_NON, /* Read nothing from admin queue */
100 I40EVF_MSG_SYS, /* Read system msg from admin queue */
101 I40EVF_MSG_CMD, /* Read async command result */
104 /* A share buffer to store the command result from PF driver */
105 static uint8_t cmd_result_buffer[I40E_AQ_BUF_SZ];
107 static int i40evf_dev_configure(struct rte_eth_dev *dev);
108 static int i40evf_dev_start(struct rte_eth_dev *dev);
109 static void i40evf_dev_stop(struct rte_eth_dev *dev);
110 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
111 struct rte_eth_dev_info *dev_info);
112 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
113 __rte_unused int wait_to_complete);
114 static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
115 struct rte_eth_stats *stats);
116 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
117 uint16_t vlan_id, int on);
118 static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
119 static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
121 static void i40evf_dev_close(struct rte_eth_dev *dev);
122 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
123 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
124 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
125 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
126 static int i40evf_get_link_status(struct rte_eth_dev *dev,
127 struct rte_eth_link *link);
128 static int i40evf_init_vlan(struct rte_eth_dev *dev);
129 static int i40evf_config_rss(struct i40e_vf *vf);
130 static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
131 struct rte_eth_rss_conf *rss_conf);
132 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
133 struct rte_eth_rss_conf *rss_conf);
134 static int i40evf_dev_rx_queue_start(struct rte_eth_dev *, uint16_t);
135 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *, uint16_t);
136 static int i40evf_dev_tx_queue_start(struct rte_eth_dev *, uint16_t);
137 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *, uint16_t);
139 /* Default hash key buffer for RSS */
140 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
142 static struct eth_dev_ops i40evf_eth_dev_ops = {
143 .dev_configure = i40evf_dev_configure,
144 .dev_start = i40evf_dev_start,
145 .dev_stop = i40evf_dev_stop,
146 .promiscuous_enable = i40evf_dev_promiscuous_enable,
147 .promiscuous_disable = i40evf_dev_promiscuous_disable,
148 .allmulticast_enable = i40evf_dev_allmulticast_enable,
149 .allmulticast_disable = i40evf_dev_allmulticast_disable,
150 .link_update = i40evf_dev_link_update,
151 .stats_get = i40evf_dev_stats_get,
152 .dev_close = i40evf_dev_close,
153 .dev_infos_get = i40evf_dev_info_get,
154 .vlan_filter_set = i40evf_vlan_filter_set,
155 .vlan_offload_set = i40evf_vlan_offload_set,
156 .vlan_pvid_set = i40evf_vlan_pvid_set,
157 .rx_queue_start = i40evf_dev_rx_queue_start,
158 .rx_queue_stop = i40evf_dev_rx_queue_stop,
159 .tx_queue_start = i40evf_dev_tx_queue_start,
160 .tx_queue_stop = i40evf_dev_tx_queue_stop,
161 .rx_queue_setup = i40e_dev_rx_queue_setup,
162 .rx_queue_release = i40e_dev_rx_queue_release,
163 .tx_queue_setup = i40e_dev_tx_queue_setup,
164 .tx_queue_release = i40e_dev_tx_queue_release,
165 .rss_hash_update = i40evf_dev_rss_hash_update,
166 .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
170 i40evf_set_mac_type(struct i40e_hw *hw)
172 int status = I40E_ERR_DEVICE_NOT_SUPPORTED;
174 if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
175 switch (hw->device_id) {
177 case I40E_DEV_ID_VF_HV:
178 hw->mac.type = I40E_MAC_VF;
179 status = I40E_SUCCESS;
190 * Parse admin queue message.
195 * > 0: read cmd result
197 static enum i40evf_aq_result
198 i40evf_parse_pfmsg(struct i40e_vf *vf,
199 struct i40e_arq_event_info *event,
200 struct i40evf_arq_msg_info *data)
202 enum i40e_virtchnl_ops opcode = (enum i40e_virtchnl_ops)\
203 rte_le_to_cpu_32(event->desc.cookie_high);
204 enum i40e_status_code retval = (enum i40e_status_code)\
205 rte_le_to_cpu_32(event->desc.cookie_low);
206 enum i40evf_aq_result ret = I40EVF_MSG_CMD;
209 if (opcode == I40E_VIRTCHNL_OP_EVENT) {
210 struct i40e_virtchnl_pf_event *vpe =
211 (struct i40e_virtchnl_pf_event *)event->msg_buf;
213 /* Initialize ret to sys event */
214 ret = I40EVF_MSG_SYS;
215 switch (vpe->event) {
216 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
218 vpe->event_data.link_event.link_status;
219 vf->pend_msg |= PFMSG_LINK_CHANGE;
220 PMD_DRV_LOG(INFO, "Link status update:%s",
221 vf->link_up ? "up" : "down");
223 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
225 vf->pend_msg |= PFMSG_RESET_IMPENDING;
226 PMD_DRV_LOG(INFO, "vf is reseting");
228 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
229 vf->dev_closed = true;
230 vf->pend_msg |= PFMSG_DRIVER_CLOSE;
231 PMD_DRV_LOG(INFO, "PF driver closed");
234 PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
235 __func__, vpe->event);
238 /* async reply msg on command issued by vf previously */
239 ret = I40EVF_MSG_CMD;
240 /* Actual data length read from PF */
241 data->msg_len = event->msg_len;
243 /* fill the ops and result to notify VF */
244 data->result = retval;
251 * Read data in admin queue to get msg from pf driver
253 static enum i40evf_aq_result
254 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
256 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
257 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
258 struct i40e_arq_event_info event;
260 enum i40evf_aq_result result = I40EVF_MSG_NON;
262 event.buf_len = data->buf_len;
263 event.msg_buf = data->msg;
264 ret = i40e_clean_arq_element(hw, &event, NULL);
265 /* Can't read any msg from adminQ */
267 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
268 result = I40EVF_MSG_NON;
270 result = I40EVF_MSG_ERR;
274 /* Parse the event */
275 result = i40evf_parse_pfmsg(vf, &event, data);
281 * Polling read until command result return from pf driver or meet error.
284 i40evf_wait_cmd_done(struct rte_eth_dev *dev,
285 struct i40evf_arq_msg_info *data)
288 enum i40evf_aq_result ret;
290 #define MAX_TRY_TIMES 10
291 #define ASQ_DELAY_MS 50
293 /* Delay some time first */
294 rte_delay_ms(ASQ_DELAY_MS);
295 ret = i40evf_read_pfmsg(dev, data);
296 if (ret == I40EVF_MSG_CMD)
298 else if (ret == I40EVF_MSG_ERR)
301 /* If don't read msg or read sys event, continue */
302 } while(i++ < MAX_TRY_TIMES);
308 * clear current command. Only call in case execute
309 * _atomic_set_cmd successfully.
312 _clear_cmd(struct i40e_vf *vf)
315 vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
319 * Check there is pending cmd in execution. If none, set new command.
322 _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
324 int ret = rte_atomic32_cmpset(&vf->pend_cmd,
325 I40E_VIRTCHNL_OP_UNKNOWN, ops);
328 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
334 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
336 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
337 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
339 struct i40evf_arq_msg_info info;
341 if (_atomic_set_cmd(vf, args->ops))
344 info.msg = args->out_buffer;
345 info.buf_len = args->out_size;
346 info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
347 info.result = I40E_SUCCESS;
349 err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
350 args->in_args, args->in_args_size, NULL);
352 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
356 err = i40evf_wait_cmd_done(dev, &info);
357 /* read message and it's expected one */
358 if (!err && args->ops == info.ops)
361 PMD_DRV_LOG(ERR, "Failed to read message from AdminQ");
362 else if (args->ops != info.ops)
363 PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
364 args->ops, info.ops);
366 return (err | info.result);
370 * Check API version with sync wait until version read or fail from admin queue
373 i40evf_check_api_version(struct rte_eth_dev *dev)
375 struct i40e_virtchnl_version_info version, *pver;
377 struct vf_cmd_info args;
378 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
380 version.major = I40E_VIRTCHNL_VERSION_MAJOR;
381 version.minor = I40E_VIRTCHNL_VERSION_MINOR;
383 args.ops = I40E_VIRTCHNL_OP_VERSION;
384 args.in_args = (uint8_t *)&version;
385 args.in_args_size = sizeof(version);
386 args.out_buffer = cmd_result_buffer;
387 args.out_size = I40E_AQ_BUF_SZ;
389 err = i40evf_execute_vf_cmd(dev, &args);
391 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
395 pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
396 vf->version_major = pver->major;
397 vf->version_minor = pver->minor;
398 if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
399 PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
400 else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
401 (vf->version_minor == I40E_VIRTCHNL_VERSION_MINOR))
402 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
404 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
405 vf->version_major, vf->version_minor,
406 I40E_VIRTCHNL_VERSION_MAJOR,
407 I40E_VIRTCHNL_VERSION_MINOR);
415 i40evf_get_vf_resource(struct rte_eth_dev *dev)
417 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
418 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
420 struct vf_cmd_info args;
423 args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
425 args.in_args_size = 0;
426 args.out_buffer = cmd_result_buffer;
427 args.out_size = I40E_AQ_BUF_SZ;
429 err = i40evf_execute_vf_cmd(dev, &args);
432 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
436 len = sizeof(struct i40e_virtchnl_vf_resource) +
437 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
439 (void)rte_memcpy(vf->vf_res, args.out_buffer,
440 RTE_MIN(args.out_size, len));
441 i40e_vf_parse_hw_config(hw, vf->vf_res);
447 i40evf_config_promisc(struct rte_eth_dev *dev,
449 bool enable_multicast)
451 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
453 struct vf_cmd_info args;
454 struct i40e_virtchnl_promisc_info promisc;
457 promisc.vsi_id = vf->vsi_res->vsi_id;
460 promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
462 if (enable_multicast)
463 promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
465 args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
466 args.in_args = (uint8_t *)&promisc;
467 args.in_args_size = sizeof(promisc);
468 args.out_buffer = cmd_result_buffer;
469 args.out_size = I40E_AQ_BUF_SZ;
471 err = i40evf_execute_vf_cmd(dev, &args);
474 PMD_DRV_LOG(ERR, "fail to execute command "
475 "CONFIG_PROMISCUOUS_MODE");
479 /* Configure vlan and double vlan offload. Use flag to specify which part to configure */
481 i40evf_config_vlan_offload(struct rte_eth_dev *dev,
482 bool enable_vlan_strip)
484 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
486 struct vf_cmd_info args;
487 struct i40e_virtchnl_vlan_offload_info offload;
489 offload.vsi_id = vf->vsi_res->vsi_id;
490 offload.enable_vlan_strip = enable_vlan_strip;
492 args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
493 args.in_args = (uint8_t *)&offload;
494 args.in_args_size = sizeof(offload);
495 args.out_buffer = cmd_result_buffer;
496 args.out_size = I40E_AQ_BUF_SZ;
498 err = i40evf_execute_vf_cmd(dev, &args);
500 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
506 i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
507 struct i40e_vsi_vlan_pvid_info *info)
509 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
511 struct vf_cmd_info args;
512 struct i40e_virtchnl_pvid_info tpid_info;
514 if (dev == NULL || info == NULL) {
515 PMD_DRV_LOG(ERR, "invalid parameters");
516 return I40E_ERR_PARAM;
519 memset(&tpid_info, 0, sizeof(tpid_info));
520 tpid_info.vsi_id = vf->vsi_res->vsi_id;
521 (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
523 args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
524 args.in_args = (uint8_t *)&tpid_info;
525 args.in_args_size = sizeof(tpid_info);
526 args.out_buffer = cmd_result_buffer;
527 args.out_size = I40E_AQ_BUF_SZ;
529 err = i40evf_execute_vf_cmd(dev, &args);
531 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
537 i40evf_configure_queues(struct rte_eth_dev *dev)
539 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
540 struct i40e_virtchnl_vsi_queue_config_info *queue_info;
541 struct i40e_virtchnl_queue_pair_info *queue_cfg;
542 struct i40e_rx_queue **rxq =
543 (struct i40e_rx_queue **)dev->data->rx_queues;
544 struct i40e_tx_queue **txq =
545 (struct i40e_tx_queue **)dev->data->tx_queues;
546 int i, len, nb_qpairs, num_rxq, num_txq;
548 struct vf_cmd_info args;
549 struct rte_pktmbuf_pool_private *mbp_priv;
551 nb_qpairs = vf->num_queue_pairs;
552 len = sizeof(*queue_info) + sizeof(*queue_cfg) * nb_qpairs;
553 queue_info = rte_zmalloc("queue_info", len, 0);
554 if (queue_info == NULL) {
555 PMD_INIT_LOG(ERR, "failed alloc memory for queue_info");
558 queue_info->vsi_id = vf->vsi_res->vsi_id;
559 queue_info->num_queue_pairs = nb_qpairs;
560 queue_cfg = queue_info->qpair;
562 num_rxq = dev->data->nb_rx_queues;
563 num_txq = dev->data->nb_tx_queues;
565 * PF host driver required to configure queues in pairs, which means
566 * rxq_num should equals to txq_num. The actual usage won't always
567 * work that way. The solution is fills 0 with HW ring option in case
568 * they are not equal.
570 for (i = 0; i < nb_qpairs; i++) {
572 queue_cfg->txq.vsi_id = queue_info->vsi_id;
573 queue_cfg->txq.queue_id = i;
575 queue_cfg->txq.ring_len = txq[i]->nb_tx_desc;
576 queue_cfg->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
578 queue_cfg->txq.ring_len = 0;
579 queue_cfg->txq.dma_ring_addr = 0;
583 queue_cfg->rxq.vsi_id = queue_info->vsi_id;
584 queue_cfg->rxq.queue_id = i;
585 queue_cfg->rxq.max_pkt_size = vf->max_pkt_len;
587 mbp_priv = rte_mempool_get_priv(rxq[i]->mp);
588 queue_cfg->rxq.databuffer_size = mbp_priv->mbuf_data_room_size -
589 RTE_PKTMBUF_HEADROOM;;
590 queue_cfg->rxq.ring_len = rxq[i]->nb_rx_desc;
591 queue_cfg->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;;
593 queue_cfg->rxq.ring_len = 0;
594 queue_cfg->rxq.dma_ring_addr = 0;
595 queue_cfg->rxq.databuffer_size = 0;
600 args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
601 args.in_args = (u8 *)queue_info;
602 args.in_args_size = len;
603 args.out_buffer = cmd_result_buffer;
604 args.out_size = I40E_AQ_BUF_SZ;
605 err = i40evf_execute_vf_cmd(dev, &args);
607 PMD_DRV_LOG(ERR, "fail to execute command "
608 "OP_CONFIG_VSI_QUEUES");
609 rte_free(queue_info);
615 i40evf_config_irq_map(struct rte_eth_dev *dev)
617 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
618 struct vf_cmd_info args;
619 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
620 sizeof(struct i40e_virtchnl_vector_map)];
621 struct i40e_virtchnl_irq_map_info *map_info;
623 map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
624 map_info->num_vectors = 1;
625 map_info->vecmap[0].rxitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
626 map_info->vecmap[0].txitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
627 map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
628 /* Alway use default dynamic MSIX interrupt */
629 map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
630 /* Don't map any tx queue */
631 map_info->vecmap[0].txq_map = 0;
632 map_info->vecmap[0].rxq_map = 0;
633 for (i = 0; i < dev->data->nb_rx_queues; i++)
634 map_info->vecmap[0].rxq_map |= 1 << i;
636 args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
637 args.in_args = (u8 *)cmd_buffer;
638 args.in_args_size = sizeof(cmd_buffer);
639 args.out_buffer = cmd_result_buffer;
640 args.out_size = I40E_AQ_BUF_SZ;
641 err = i40evf_execute_vf_cmd(dev, &args);
643 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
649 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
652 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
653 struct i40e_virtchnl_queue_select queue_select;
655 struct vf_cmd_info args;
656 memset(&queue_select, 0, sizeof(queue_select));
657 queue_select.vsi_id = vf->vsi_res->vsi_id;
660 queue_select.rx_queues |= 1 << qid;
662 queue_select.tx_queues |= 1 << qid;
665 args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
667 args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
668 args.in_args = (u8 *)&queue_select;
669 args.in_args_size = sizeof(queue_select);
670 args.out_buffer = cmd_result_buffer;
671 args.out_size = I40E_AQ_BUF_SZ;
672 err = i40evf_execute_vf_cmd(dev, &args);
674 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
675 isrx ? "RX" : "TX", qid, on ? "on" : "off");
681 i40evf_start_queues(struct rte_eth_dev *dev)
683 struct rte_eth_dev_data *dev_data = dev->data;
685 struct i40e_rx_queue *rxq;
686 struct i40e_tx_queue *txq;
688 for (i = 0; i < dev->data->nb_rx_queues; i++) {
689 rxq = dev_data->rx_queues[i];
690 if (rxq->rx_deferred_start)
692 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
693 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
698 for (i = 0; i < dev->data->nb_tx_queues; i++) {
699 txq = dev_data->tx_queues[i];
700 if (txq->tx_deferred_start)
702 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
703 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
712 i40evf_stop_queues(struct rte_eth_dev *dev)
716 /* Stop TX queues first */
717 for (i = 0; i < dev->data->nb_tx_queues; i++) {
718 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
719 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
724 /* Then stop RX queues */
725 for (i = 0; i < dev->data->nb_rx_queues; i++) {
726 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
727 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
736 i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
738 struct i40e_virtchnl_ether_addr_list *list;
739 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
740 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
741 sizeof(struct i40e_virtchnl_ether_addr)];
743 struct vf_cmd_info args;
745 if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
746 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
747 addr->addr_bytes[0], addr->addr_bytes[1],
748 addr->addr_bytes[2], addr->addr_bytes[3],
749 addr->addr_bytes[4], addr->addr_bytes[5]);
753 list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
754 list->vsi_id = vf->vsi_res->vsi_id;
755 list->num_elements = 1;
756 (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
757 sizeof(addr->addr_bytes));
759 args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
760 args.in_args = cmd_buffer;
761 args.in_args_size = sizeof(cmd_buffer);
762 args.out_buffer = cmd_result_buffer;
763 args.out_size = I40E_AQ_BUF_SZ;
764 err = i40evf_execute_vf_cmd(dev, &args);
766 PMD_DRV_LOG(ERR, "fail to execute command "
767 "OP_ADD_ETHER_ADDRESS");
773 i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
775 struct i40e_virtchnl_ether_addr_list *list;
776 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
777 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
778 sizeof(struct i40e_virtchnl_ether_addr)];
780 struct vf_cmd_info args;
782 if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
783 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
784 addr->addr_bytes[0], addr->addr_bytes[1],
785 addr->addr_bytes[2], addr->addr_bytes[3],
786 addr->addr_bytes[4], addr->addr_bytes[5]);
790 list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
791 list->vsi_id = vf->vsi_res->vsi_id;
792 list->num_elements = 1;
793 (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
794 sizeof(addr->addr_bytes));
796 args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
797 args.in_args = cmd_buffer;
798 args.in_args_size = sizeof(cmd_buffer);
799 args.out_buffer = cmd_result_buffer;
800 args.out_size = I40E_AQ_BUF_SZ;
801 err = i40evf_execute_vf_cmd(dev, &args);
803 PMD_DRV_LOG(ERR, "fail to execute command "
804 "OP_DEL_ETHER_ADDRESS");
810 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
812 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
813 struct i40e_virtchnl_queue_select q_stats;
814 struct i40e_eth_stats *pstats;
816 struct vf_cmd_info args;
818 memset(&q_stats, 0, sizeof(q_stats));
819 q_stats.vsi_id = vf->vsi_res->vsi_id;
820 args.ops = I40E_VIRTCHNL_OP_GET_STATS;
821 args.in_args = (u8 *)&q_stats;
822 args.in_args_size = sizeof(q_stats);
823 args.out_buffer = cmd_result_buffer;
824 args.out_size = I40E_AQ_BUF_SZ;
826 err = i40evf_execute_vf_cmd(dev, &args);
828 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
831 pstats = (struct i40e_eth_stats *)args.out_buffer;
832 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
833 pstats->rx_broadcast;
834 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
836 stats->ierrors = pstats->rx_discards;
837 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
838 stats->ibytes = pstats->rx_bytes;
839 stats->obytes = pstats->tx_bytes;
845 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
847 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
848 struct i40e_virtchnl_vlan_filter_list *vlan_list;
849 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
852 struct vf_cmd_info args;
854 vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
855 vlan_list->vsi_id = vf->vsi_res->vsi_id;
856 vlan_list->num_elements = 1;
857 vlan_list->vlan_id[0] = vlanid;
859 args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
860 args.in_args = (u8 *)&cmd_buffer;
861 args.in_args_size = sizeof(cmd_buffer);
862 args.out_buffer = cmd_result_buffer;
863 args.out_size = I40E_AQ_BUF_SZ;
864 err = i40evf_execute_vf_cmd(dev, &args);
866 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
872 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
874 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
875 struct i40e_virtchnl_vlan_filter_list *vlan_list;
876 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
879 struct vf_cmd_info args;
881 vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
882 vlan_list->vsi_id = vf->vsi_res->vsi_id;
883 vlan_list->num_elements = 1;
884 vlan_list->vlan_id[0] = vlanid;
886 args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
887 args.in_args = (u8 *)&cmd_buffer;
888 args.in_args_size = sizeof(cmd_buffer);
889 args.out_buffer = cmd_result_buffer;
890 args.out_size = I40E_AQ_BUF_SZ;
891 err = i40evf_execute_vf_cmd(dev, &args);
893 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
899 i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
902 struct vf_cmd_info args;
903 struct rte_eth_link *new_link;
905 args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
907 args.in_args_size = 0;
908 args.out_buffer = cmd_result_buffer;
909 args.out_size = I40E_AQ_BUF_SZ;
910 err = i40evf_execute_vf_cmd(dev, &args);
912 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
916 new_link = (struct rte_eth_link *)args.out_buffer;
917 (void)rte_memcpy(link, new_link, sizeof(*link));
922 static struct rte_pci_id pci_id_i40evf_map[] = {
923 #define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
924 #include "rte_pci_dev_ids.h"
925 { .vendor_id = 0, /* sentinel */ },
929 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
930 struct rte_eth_link *link)
932 struct rte_eth_link *dst = &(dev->data->dev_link);
933 struct rte_eth_link *src = link;
935 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
936 *(uint64_t *)src) == 0)
943 i40evf_reset_vf(struct i40e_hw *hw)
947 if (i40e_vf_reset(hw) != I40E_SUCCESS) {
948 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
952 * After issuing vf reset command to pf, pf won't necessarily
953 * reset vf, it depends on what state it exactly is. If it's not
954 * initialized yet, it won't have vf reset since it's in a certain
955 * state. If not, it will try to reset. Even vf is reset, pf will
956 * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
957 * it to ACTIVE. In this duration, vf may not catch the moment that
958 * COMPLETE is set. So, for vf, we'll try to wait a long time.
962 for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
963 reset = rd32(hw, I40E_VFGEN_RSTAT) &
964 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
965 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
966 if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
972 if (i >= MAX_RESET_WAIT_CNT) {
973 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
981 i40evf_init_vf(struct rte_eth_dev *dev)
984 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
985 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
987 vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
988 vf->dev_data = dev->data;
989 err = i40evf_set_mac_type(hw);
991 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
995 i40e_init_adminq_parameter(hw);
996 err = i40e_init_adminq(hw);
998 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1003 /* Reset VF and wait until it's complete */
1004 if (i40evf_reset_vf(hw)) {
1005 PMD_INIT_LOG(ERR, "reset NIC failed");
1009 /* VF reset, shutdown admin queue and initialize again */
1010 if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1011 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1015 i40e_init_adminq_parameter(hw);
1016 if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1017 PMD_INIT_LOG(ERR, "init_adminq failed");
1020 if (i40evf_check_api_version(dev) != 0) {
1021 PMD_INIT_LOG(ERR, "check_api version failed");
1024 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1025 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1026 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1028 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1032 if (i40evf_get_vf_resource(dev) != 0) {
1033 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1037 /* got VF config message back from PF, now we can parse it */
1038 for (i = 0; i < vf->vf_res->num_vsis; i++) {
1039 if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
1040 vf->vsi_res = &vf->vf_res->vsi_res[i];
1044 PMD_INIT_LOG(ERR, "no LAN VSI found");
1048 vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1049 vf->vsi.type = vf->vsi_res->vsi_type;
1050 vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1052 /* check mac addr, if it's not valid, genrate one */
1053 if (I40E_SUCCESS != i40e_validate_mac_addr(\
1054 vf->vsi_res->default_mac_addr))
1055 eth_random_addr(vf->vsi_res->default_mac_addr);
1057 ether_addr_copy((struct ether_addr *)vf->vsi_res->default_mac_addr,
1058 (struct ether_addr *)hw->mac.addr);
1063 rte_free(vf->vf_res);
1065 i40e_shutdown_adminq(hw); /* ignore error */
1071 i40evf_dev_init(__rte_unused struct eth_driver *eth_drv,
1072 struct rte_eth_dev *eth_dev)
1074 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
1075 eth_dev->data->dev_private);
1077 PMD_INIT_FUNC_TRACE();
1079 /* assign ops func pointer */
1080 eth_dev->dev_ops = &i40evf_eth_dev_ops;
1081 eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1082 eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1085 * For secondary processes, we don't initialise any further as primary
1086 * has already done this work.
1088 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1089 if (eth_dev->data->scattered_rx)
1090 eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
1094 hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
1095 hw->device_id = eth_dev->pci_dev->id.device_id;
1096 hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
1097 hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
1098 hw->bus.device = eth_dev->pci_dev->addr.devid;
1099 hw->bus.func = eth_dev->pci_dev->addr.function;
1100 hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1102 if(i40evf_init_vf(eth_dev) != 0) {
1103 PMD_INIT_LOG(ERR, "Init vf failed");
1108 eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1110 if (eth_dev->data->mac_addrs == NULL) {
1111 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
1112 "store MAC addresses", ETHER_ADDR_LEN);
1115 ether_addr_copy((struct ether_addr *)hw->mac.addr,
1116 (struct ether_addr *)eth_dev->data->mac_addrs);
1122 * virtual function driver struct
1124 static struct eth_driver rte_i40evf_pmd = {
1126 .name = "rte_i40evf_pmd",
1127 .id_table = pci_id_i40evf_map,
1128 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1130 .eth_dev_init = i40evf_dev_init,
1131 .dev_private_size = sizeof(struct i40e_vf),
1135 * VF Driver initialization routine.
1136 * Invoked one at EAL init time.
1137 * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
1140 rte_i40evf_pmd_init(const char *name __rte_unused,
1141 const char *params __rte_unused)
1143 PMD_INIT_FUNC_TRACE();
1145 rte_eth_driver_register(&rte_i40evf_pmd);
1150 static struct rte_driver rte_i40evf_driver = {
1152 .init = rte_i40evf_pmd_init,
1155 PMD_REGISTER_DRIVER(rte_i40evf_driver);
1158 i40evf_dev_configure(struct rte_eth_dev *dev)
1160 return i40evf_init_vlan(dev);
1164 i40evf_init_vlan(struct rte_eth_dev *dev)
1166 struct rte_eth_dev_data *data = dev->data;
1169 /* Apply vlan offload setting */
1170 i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1172 /* Apply pvid setting */
1173 ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
1174 data->dev_conf.txmode.hw_vlan_insert_pvid);
1179 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1181 bool enable_vlan_strip = 0;
1182 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1183 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1185 /* Linux pf host doesn't support vlan offload yet */
1186 if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1187 /* Vlan stripping setting */
1188 if (mask & ETH_VLAN_STRIP_MASK) {
1189 /* Enable or disable VLAN stripping */
1190 if (dev_conf->rxmode.hw_vlan_strip)
1191 enable_vlan_strip = 1;
1193 enable_vlan_strip = 0;
1195 i40evf_config_vlan_offload(dev, enable_vlan_strip);
1201 i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1203 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1204 struct i40e_vsi_vlan_pvid_info info;
1205 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1207 memset(&info, 0, sizeof(info));
1210 /* Linux pf host don't support vlan offload yet */
1211 if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1213 info.config.pvid = pvid;
1215 info.config.reject.tagged =
1216 dev_conf->txmode.hw_vlan_reject_tagged;
1217 info.config.reject.untagged =
1218 dev_conf->txmode.hw_vlan_reject_untagged;
1220 return i40evf_config_vlan_pvid(dev, &info);
1227 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1229 struct i40e_rx_queue *rxq;
1231 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1233 PMD_INIT_FUNC_TRACE();
1235 if (rx_queue_id < dev->data->nb_rx_queues) {
1236 rxq = dev->data->rx_queues[rx_queue_id];
1238 err = i40e_alloc_rx_queue_mbufs(rxq);
1240 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1246 /* Init the RX tail register. */
1247 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1248 I40EVF_WRITE_FLUSH(hw);
1250 /* Ready to switch the queue on */
1251 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1254 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1262 i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1264 struct i40e_rx_queue *rxq;
1267 if (rx_queue_id < dev->data->nb_rx_queues) {
1268 rxq = dev->data->rx_queues[rx_queue_id];
1270 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1273 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1278 i40e_rx_queue_release_mbufs(rxq);
1279 i40e_reset_rx_queue(rxq);
1286 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1290 PMD_INIT_FUNC_TRACE();
1292 if (tx_queue_id < dev->data->nb_tx_queues) {
1294 /* Ready to switch the queue on */
1295 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1298 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1306 i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1308 struct i40e_tx_queue *txq;
1311 if (tx_queue_id < dev->data->nb_tx_queues) {
1312 txq = dev->data->tx_queues[tx_queue_id];
1314 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1317 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
1322 i40e_tx_queue_release_mbufs(txq);
1323 i40e_reset_tx_queue(txq);
1330 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1335 ret = i40evf_add_vlan(dev, vlan_id);
1337 ret = i40evf_del_vlan(dev,vlan_id);
1343 i40evf_rx_init(struct rte_eth_dev *dev)
1345 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1347 struct i40e_rx_queue **rxq =
1348 (struct i40e_rx_queue **)dev->data->rx_queues;
1349 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1351 i40evf_config_rss(vf);
1352 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1353 rxq[i]->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(i);
1354 I40E_PCI_REG_WRITE(rxq[i]->qrx_tail, rxq[i]->nb_rx_desc - 1);
1357 /* Flush the operation to write registers */
1358 I40EVF_WRITE_FLUSH(hw);
1364 i40evf_tx_init(struct rte_eth_dev *dev)
1367 struct i40e_tx_queue **txq =
1368 (struct i40e_tx_queue **)dev->data->tx_queues;
1369 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1371 for (i = 0; i < dev->data->nb_tx_queues; i++)
1372 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1376 i40evf_enable_queues_intr(struct i40e_hw *hw)
1378 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1379 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1380 I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1384 i40evf_disable_queues_intr(struct i40e_hw *hw)
1386 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1391 i40evf_dev_start(struct rte_eth_dev *dev)
1393 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1394 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1395 struct ether_addr mac_addr;
1397 PMD_INIT_FUNC_TRACE();
1399 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1400 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1401 if (vf->max_pkt_len <= ETHER_MAX_LEN ||
1402 vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1403 PMD_DRV_LOG(ERR, "maximum packet length must "
1404 "be larger than %u and smaller than %u,"
1405 "as jumbo frame is enabled",
1406 (uint32_t)ETHER_MAX_LEN,
1407 (uint32_t)I40E_FRAME_SIZE_MAX);
1408 return I40E_ERR_CONFIG;
1411 if (vf->max_pkt_len < ETHER_MIN_LEN ||
1412 vf->max_pkt_len > ETHER_MAX_LEN) {
1413 PMD_DRV_LOG(ERR, "maximum packet length must be "
1414 "larger than %u and smaller than %u, "
1415 "as jumbo frame is disabled",
1416 (uint32_t)ETHER_MIN_LEN,
1417 (uint32_t)ETHER_MAX_LEN);
1418 return I40E_ERR_CONFIG;
1422 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1423 dev->data->nb_tx_queues);
1425 if (i40evf_rx_init(dev) != 0){
1426 PMD_DRV_LOG(ERR, "failed to do RX init");
1430 i40evf_tx_init(dev);
1432 if (i40evf_configure_queues(dev) != 0) {
1433 PMD_DRV_LOG(ERR, "configure queues failed");
1436 if (i40evf_config_irq_map(dev)) {
1437 PMD_DRV_LOG(ERR, "config_irq_map failed");
1442 (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
1443 sizeof(mac_addr.addr_bytes));
1444 if (i40evf_add_mac_addr(dev, &mac_addr)) {
1445 PMD_DRV_LOG(ERR, "Failed to add mac addr");
1449 if (i40evf_start_queues(dev) != 0) {
1450 PMD_DRV_LOG(ERR, "enable queues failed");
1454 i40evf_enable_queues_intr(hw);
1458 i40evf_del_mac_addr(dev, &mac_addr);
1464 i40evf_dev_stop(struct rte_eth_dev *dev)
1466 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1468 PMD_INIT_FUNC_TRACE();
1470 i40evf_disable_queues_intr(hw);
1471 i40evf_stop_queues(dev);
1475 i40evf_dev_link_update(struct rte_eth_dev *dev,
1476 __rte_unused int wait_to_complete)
1478 struct rte_eth_link new_link;
1479 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1481 * DPDK pf host provide interfacet to acquire link status
1482 * while Linux driver does not
1484 if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1485 i40evf_get_link_status(dev, &new_link);
1487 /* Always assume it's up, for Linux driver PF host */
1488 new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
1489 new_link.link_speed = ETH_LINK_SPEED_10000;
1490 new_link.link_status = 1;
1492 i40evf_dev_atomic_write_link_status(dev, &new_link);
1498 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1500 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1503 /* If enabled, just return */
1504 if (vf->promisc_unicast_enabled)
1507 ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
1509 vf->promisc_unicast_enabled = TRUE;
1513 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1515 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1518 /* If disabled, just return */
1519 if (!vf->promisc_unicast_enabled)
1522 ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
1524 vf->promisc_unicast_enabled = FALSE;
1528 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1530 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1533 /* If enabled, just return */
1534 if (vf->promisc_multicast_enabled)
1537 ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
1539 vf->promisc_multicast_enabled = TRUE;
1543 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1545 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1548 /* If enabled, just return */
1549 if (!vf->promisc_multicast_enabled)
1552 ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
1554 vf->promisc_multicast_enabled = FALSE;
1558 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1560 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1562 memset(dev_info, 0, sizeof(*dev_info));
1563 dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
1564 dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
1565 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1566 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1570 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1572 memset(stats, 0, sizeof(*stats));
1573 if (i40evf_get_statics(dev, stats))
1574 PMD_DRV_LOG(ERR, "Get statics failed");
1578 i40evf_dev_close(struct rte_eth_dev *dev)
1580 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1582 i40evf_dev_stop(dev);
1583 i40evf_reset_vf(hw);
1584 i40e_shutdown_adminq(hw);
1588 i40evf_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
1591 uint8_t hash_key_len;
1592 uint64_t rss_hf, hena;
1594 hash_key = (uint32_t *)(rss_conf->rss_key);
1595 hash_key_len = rss_conf->rss_key_len;
1596 if (hash_key != NULL && hash_key_len >=
1597 (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
1600 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1601 I40E_WRITE_REG(hw, I40E_VFQF_HKEY(i), hash_key[i]);
1604 rss_hf = rss_conf->rss_hf;
1605 hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1606 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1607 hena &= ~I40E_RSS_HENA_ALL;
1608 hena |= i40e_config_hena(rss_hf);
1609 I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
1610 I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
1611 I40EVF_WRITE_FLUSH(hw);
1617 i40evf_disable_rss(struct i40e_vf *vf)
1619 struct i40e_hw *hw = I40E_VF_TO_HW(vf);
1622 hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1623 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1624 hena &= ~I40E_RSS_HENA_ALL;
1625 I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
1626 I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
1627 I40EVF_WRITE_FLUSH(hw);
1631 i40evf_config_rss(struct i40e_vf *vf)
1633 struct i40e_hw *hw = I40E_VF_TO_HW(vf);
1634 struct rte_eth_rss_conf rss_conf;
1635 uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
1637 if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
1638 i40evf_disable_rss(vf);
1639 PMD_DRV_LOG(DEBUG, "RSS not configured\n");
1643 /* Fill out the look up table */
1644 for (i = 0, j = 0; i < nb_q; i++, j++) {
1645 if (j >= vf->num_queue_pairs)
1647 lut = (lut << 8) | j;
1649 I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
1652 rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
1653 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
1654 i40evf_disable_rss(vf);
1655 PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
1659 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < nb_q) {
1660 /* Calculate the default hash key */
1661 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1662 rss_key_default[i] = (uint32_t)rte_rand();
1663 rss_conf.rss_key = (uint8_t *)rss_key_default;
1664 rss_conf.rss_key_len = nb_q;
1667 return i40evf_hw_rss_hash_set(hw, &rss_conf);
1671 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
1672 struct rte_eth_rss_conf *rss_conf)
1674 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1675 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
1678 hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1679 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1680 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
1681 if (rss_hf != 0) /* Enable RSS */
1687 if (rss_hf == 0) /* Disable RSS */
1690 return i40evf_hw_rss_hash_set(hw, rss_conf);
1694 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1695 struct rte_eth_rss_conf *rss_conf)
1697 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1698 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
1703 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1704 hash_key[i] = I40E_READ_REG(hw, I40E_VFQF_HKEY(i));
1705 rss_conf->rss_key_len = i * sizeof(uint32_t);
1707 hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1708 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1709 rss_conf->rss_hf = i40e_parse_hena(hena);