4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
46 #include <rte_interrupts.h>
48 #include <rte_debug.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_atomic.h>
59 #include <rte_malloc.h>
62 #include "i40e_logs.h"
63 #include "i40e/i40e_prototype.h"
64 #include "i40e/i40e_adminq_cmd.h"
65 #include "i40e/i40e_type.h"
67 #include "i40e_rxtx.h"
68 #include "i40e_ethdev.h"
70 #define I40EVF_VSI_DEFAULT_MSIX_INTR 1
72 /* busy wait delay in msec */
73 #define I40EVF_BUSY_WAIT_DELAY 10
74 #define I40EVF_BUSY_WAIT_COUNT 50
75 #define MAX_RESET_WAIT_CNT 20
77 struct i40evf_arq_msg_info {
78 enum i40e_virtchnl_ops ops;
79 enum i40e_status_code result;
86 enum i40e_virtchnl_ops ops;
88 uint32_t in_args_size;
90 /* Input & output type. pass in buffer size and pass out
91 * actual return result
96 enum i40evf_aq_result {
97 I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
98 I40EVF_MSG_NON, /* Read nothing from admin queue */
99 I40EVF_MSG_SYS, /* Read system msg from admin queue */
100 I40EVF_MSG_CMD, /* Read async command result */
103 /* A share buffer to store the command result from PF driver */
104 static uint8_t cmd_result_buffer[I40E_AQ_BUF_SZ];
106 static int i40evf_dev_configure(struct rte_eth_dev *dev);
107 static int i40evf_dev_start(struct rte_eth_dev *dev);
108 static void i40evf_dev_stop(struct rte_eth_dev *dev);
109 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
110 struct rte_eth_dev_info *dev_info);
111 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
112 __rte_unused int wait_to_complete);
113 static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
114 struct rte_eth_stats *stats);
115 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
116 uint16_t vlan_id, int on);
117 static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
118 static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
120 static void i40evf_dev_close(struct rte_eth_dev *dev);
121 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
122 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
123 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
124 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
125 static int i40evf_get_link_status(struct rte_eth_dev *dev,
126 struct rte_eth_link *link);
127 static int i40evf_init_vlan(struct rte_eth_dev *dev);
128 static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
129 uint16_t rx_queue_id);
130 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
131 uint16_t rx_queue_id);
132 static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
133 uint16_t tx_queue_id);
134 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
135 uint16_t tx_queue_id);
136 static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
137 struct rte_eth_rss_reta_entry64 *reta_conf,
139 static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
140 struct rte_eth_rss_reta_entry64 *reta_conf,
142 static int i40evf_config_rss(struct i40e_vf *vf);
143 static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
144 struct rte_eth_rss_conf *rss_conf);
145 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
146 struct rte_eth_rss_conf *rss_conf);
148 /* Default hash key buffer for RSS */
149 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
151 static const struct eth_dev_ops i40evf_eth_dev_ops = {
152 .dev_configure = i40evf_dev_configure,
153 .dev_start = i40evf_dev_start,
154 .dev_stop = i40evf_dev_stop,
155 .promiscuous_enable = i40evf_dev_promiscuous_enable,
156 .promiscuous_disable = i40evf_dev_promiscuous_disable,
157 .allmulticast_enable = i40evf_dev_allmulticast_enable,
158 .allmulticast_disable = i40evf_dev_allmulticast_disable,
159 .link_update = i40evf_dev_link_update,
160 .stats_get = i40evf_dev_stats_get,
161 .dev_close = i40evf_dev_close,
162 .dev_infos_get = i40evf_dev_info_get,
163 .vlan_filter_set = i40evf_vlan_filter_set,
164 .vlan_offload_set = i40evf_vlan_offload_set,
165 .vlan_pvid_set = i40evf_vlan_pvid_set,
166 .rx_queue_start = i40evf_dev_rx_queue_start,
167 .rx_queue_stop = i40evf_dev_rx_queue_stop,
168 .tx_queue_start = i40evf_dev_tx_queue_start,
169 .tx_queue_stop = i40evf_dev_tx_queue_stop,
170 .rx_queue_setup = i40e_dev_rx_queue_setup,
171 .rx_queue_release = i40e_dev_rx_queue_release,
172 .tx_queue_setup = i40e_dev_tx_queue_setup,
173 .tx_queue_release = i40e_dev_tx_queue_release,
174 .reta_update = i40evf_dev_rss_reta_update,
175 .reta_query = i40evf_dev_rss_reta_query,
176 .rss_hash_update = i40evf_dev_rss_hash_update,
177 .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
181 i40evf_set_mac_type(struct i40e_hw *hw)
183 int status = I40E_ERR_DEVICE_NOT_SUPPORTED;
185 if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
186 switch (hw->device_id) {
188 case I40E_DEV_ID_VF_HV:
189 hw->mac.type = I40E_MAC_VF;
190 status = I40E_SUCCESS;
201 * Parse admin queue message.
206 * > 0: read cmd result
208 static enum i40evf_aq_result
209 i40evf_parse_pfmsg(struct i40e_vf *vf,
210 struct i40e_arq_event_info *event,
211 struct i40evf_arq_msg_info *data)
213 enum i40e_virtchnl_ops opcode = (enum i40e_virtchnl_ops)\
214 rte_le_to_cpu_32(event->desc.cookie_high);
215 enum i40e_status_code retval = (enum i40e_status_code)\
216 rte_le_to_cpu_32(event->desc.cookie_low);
217 enum i40evf_aq_result ret = I40EVF_MSG_CMD;
220 if (opcode == I40E_VIRTCHNL_OP_EVENT) {
221 struct i40e_virtchnl_pf_event *vpe =
222 (struct i40e_virtchnl_pf_event *)event->msg_buf;
224 /* Initialize ret to sys event */
225 ret = I40EVF_MSG_SYS;
226 switch (vpe->event) {
227 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
229 vpe->event_data.link_event.link_status;
230 vf->pend_msg |= PFMSG_LINK_CHANGE;
231 PMD_DRV_LOG(INFO, "Link status update:%s",
232 vf->link_up ? "up" : "down");
234 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
236 vf->pend_msg |= PFMSG_RESET_IMPENDING;
237 PMD_DRV_LOG(INFO, "vf is reseting");
239 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
240 vf->dev_closed = true;
241 vf->pend_msg |= PFMSG_DRIVER_CLOSE;
242 PMD_DRV_LOG(INFO, "PF driver closed");
245 PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
246 __func__, vpe->event);
249 /* async reply msg on command issued by vf previously */
250 ret = I40EVF_MSG_CMD;
251 /* Actual data length read from PF */
252 data->msg_len = event->msg_len;
254 /* fill the ops and result to notify VF */
255 data->result = retval;
262 * Read data in admin queue to get msg from pf driver
264 static enum i40evf_aq_result
265 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
267 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
268 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
269 struct i40e_arq_event_info event;
271 enum i40evf_aq_result result = I40EVF_MSG_NON;
273 event.buf_len = data->buf_len;
274 event.msg_buf = data->msg;
275 ret = i40e_clean_arq_element(hw, &event, NULL);
276 /* Can't read any msg from adminQ */
278 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
279 result = I40EVF_MSG_NON;
281 result = I40EVF_MSG_ERR;
285 /* Parse the event */
286 result = i40evf_parse_pfmsg(vf, &event, data);
292 * Polling read until command result return from pf driver or meet error.
295 i40evf_wait_cmd_done(struct rte_eth_dev *dev,
296 struct i40evf_arq_msg_info *data)
299 enum i40evf_aq_result ret;
301 #define MAX_TRY_TIMES 10
302 #define ASQ_DELAY_MS 50
304 /* Delay some time first */
305 rte_delay_ms(ASQ_DELAY_MS);
306 ret = i40evf_read_pfmsg(dev, data);
307 if (ret == I40EVF_MSG_CMD)
309 else if (ret == I40EVF_MSG_ERR)
312 /* If don't read msg or read sys event, continue */
313 } while(i++ < MAX_TRY_TIMES);
319 * clear current command. Only call in case execute
320 * _atomic_set_cmd successfully.
323 _clear_cmd(struct i40e_vf *vf)
326 vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
330 * Check there is pending cmd in execution. If none, set new command.
333 _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
335 int ret = rte_atomic32_cmpset(&vf->pend_cmd,
336 I40E_VIRTCHNL_OP_UNKNOWN, ops);
339 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
345 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
347 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
348 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
350 struct i40evf_arq_msg_info info;
352 if (_atomic_set_cmd(vf, args->ops))
355 info.msg = args->out_buffer;
356 info.buf_len = args->out_size;
357 info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
358 info.result = I40E_SUCCESS;
360 err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
361 args->in_args, args->in_args_size, NULL);
363 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
367 err = i40evf_wait_cmd_done(dev, &info);
368 /* read message and it's expected one */
369 if (!err && args->ops == info.ops)
372 PMD_DRV_LOG(ERR, "Failed to read message from AdminQ");
373 else if (args->ops != info.ops)
374 PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
375 args->ops, info.ops);
377 return (err | info.result);
381 * Check API version with sync wait until version read or fail from admin queue
384 i40evf_check_api_version(struct rte_eth_dev *dev)
386 struct i40e_virtchnl_version_info version, *pver;
388 struct vf_cmd_info args;
389 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
391 version.major = I40E_VIRTCHNL_VERSION_MAJOR;
392 version.minor = I40E_VIRTCHNL_VERSION_MINOR;
394 args.ops = I40E_VIRTCHNL_OP_VERSION;
395 args.in_args = (uint8_t *)&version;
396 args.in_args_size = sizeof(version);
397 args.out_buffer = cmd_result_buffer;
398 args.out_size = I40E_AQ_BUF_SZ;
400 err = i40evf_execute_vf_cmd(dev, &args);
402 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
406 pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
407 vf->version_major = pver->major;
408 vf->version_minor = pver->minor;
409 if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
410 PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
411 else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
412 (vf->version_minor == I40E_VIRTCHNL_VERSION_MINOR))
413 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
415 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
416 vf->version_major, vf->version_minor,
417 I40E_VIRTCHNL_VERSION_MAJOR,
418 I40E_VIRTCHNL_VERSION_MINOR);
426 i40evf_get_vf_resource(struct rte_eth_dev *dev)
428 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
429 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
431 struct vf_cmd_info args;
434 args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
436 args.in_args_size = 0;
437 args.out_buffer = cmd_result_buffer;
438 args.out_size = I40E_AQ_BUF_SZ;
440 err = i40evf_execute_vf_cmd(dev, &args);
443 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
447 len = sizeof(struct i40e_virtchnl_vf_resource) +
448 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
450 (void)rte_memcpy(vf->vf_res, args.out_buffer,
451 RTE_MIN(args.out_size, len));
452 i40e_vf_parse_hw_config(hw, vf->vf_res);
458 i40evf_config_promisc(struct rte_eth_dev *dev,
460 bool enable_multicast)
462 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
464 struct vf_cmd_info args;
465 struct i40e_virtchnl_promisc_info promisc;
468 promisc.vsi_id = vf->vsi_res->vsi_id;
471 promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
473 if (enable_multicast)
474 promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
476 args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
477 args.in_args = (uint8_t *)&promisc;
478 args.in_args_size = sizeof(promisc);
479 args.out_buffer = cmd_result_buffer;
480 args.out_size = I40E_AQ_BUF_SZ;
482 err = i40evf_execute_vf_cmd(dev, &args);
485 PMD_DRV_LOG(ERR, "fail to execute command "
486 "CONFIG_PROMISCUOUS_MODE");
490 /* Configure vlan and double vlan offload. Use flag to specify which part to configure */
492 i40evf_config_vlan_offload(struct rte_eth_dev *dev,
493 bool enable_vlan_strip)
495 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
497 struct vf_cmd_info args;
498 struct i40e_virtchnl_vlan_offload_info offload;
500 offload.vsi_id = vf->vsi_res->vsi_id;
501 offload.enable_vlan_strip = enable_vlan_strip;
503 args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
504 args.in_args = (uint8_t *)&offload;
505 args.in_args_size = sizeof(offload);
506 args.out_buffer = cmd_result_buffer;
507 args.out_size = I40E_AQ_BUF_SZ;
509 err = i40evf_execute_vf_cmd(dev, &args);
511 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
517 i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
518 struct i40e_vsi_vlan_pvid_info *info)
520 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
522 struct vf_cmd_info args;
523 struct i40e_virtchnl_pvid_info tpid_info;
525 if (dev == NULL || info == NULL) {
526 PMD_DRV_LOG(ERR, "invalid parameters");
527 return I40E_ERR_PARAM;
530 memset(&tpid_info, 0, sizeof(tpid_info));
531 tpid_info.vsi_id = vf->vsi_res->vsi_id;
532 (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
534 args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
535 args.in_args = (uint8_t *)&tpid_info;
536 args.in_args_size = sizeof(tpid_info);
537 args.out_buffer = cmd_result_buffer;
538 args.out_size = I40E_AQ_BUF_SZ;
540 err = i40evf_execute_vf_cmd(dev, &args);
542 PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
548 i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
552 struct i40e_tx_queue *txq)
554 txq_info->vsi_id = vsi_id;
555 txq_info->queue_id = queue_id;
556 if (queue_id < nb_txq) {
557 txq_info->ring_len = txq->nb_tx_desc;
558 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
563 i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
567 uint32_t max_pkt_size,
568 struct i40e_rx_queue *rxq)
570 rxq_info->vsi_id = vsi_id;
571 rxq_info->queue_id = queue_id;
572 rxq_info->max_pkt_size = max_pkt_size;
573 if (queue_id < nb_rxq) {
574 struct rte_pktmbuf_pool_private *mbp_priv;
576 rxq_info->ring_len = rxq->nb_rx_desc;
577 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
578 mbp_priv = rte_mempool_get_priv(rxq->mp);
579 rxq_info->databuffer_size =
580 mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
584 /* It configures VSI queues to co-work with Linux PF host */
586 i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
588 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
589 struct i40e_rx_queue **rxq =
590 (struct i40e_rx_queue **)dev->data->rx_queues;
591 struct i40e_tx_queue **txq =
592 (struct i40e_tx_queue **)dev->data->tx_queues;
593 struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
594 struct i40e_virtchnl_queue_pair_info *vc_qpi;
595 struct vf_cmd_info args;
596 uint16_t i, nb_qp = vf->num_queue_pairs;
597 const uint32_t size =
598 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
602 memset(buff, 0, sizeof(buff));
603 vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
604 vc_vqci->vsi_id = vf->vsi_res->vsi_id;
605 vc_vqci->num_queue_pairs = nb_qp;
607 for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
608 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
609 vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
610 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
611 vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
612 vf->max_pkt_len, rxq[i]);
614 memset(&args, 0, sizeof(args));
615 args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
616 args.in_args = (uint8_t *)vc_vqci;
617 args.in_args_size = size;
618 args.out_buffer = cmd_result_buffer;
619 args.out_size = I40E_AQ_BUF_SZ;
620 ret = i40evf_execute_vf_cmd(dev, &args);
622 PMD_DRV_LOG(ERR, "Failed to execute command of "
623 "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
628 /* It configures VSI queues to co-work with DPDK PF host */
630 i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
632 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
633 struct i40e_rx_queue **rxq =
634 (struct i40e_rx_queue **)dev->data->rx_queues;
635 struct i40e_tx_queue **txq =
636 (struct i40e_tx_queue **)dev->data->tx_queues;
637 struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
638 struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
639 struct vf_cmd_info args;
640 uint16_t i, nb_qp = vf->num_queue_pairs;
641 const uint32_t size =
642 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
646 memset(buff, 0, sizeof(buff));
647 vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
648 vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
649 vc_vqcei->num_queue_pairs = nb_qp;
650 vc_qpei = vc_vqcei->qpair;
651 for (i = 0; i < nb_qp; i++, vc_qpei++) {
652 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
653 vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
654 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
655 vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
656 vf->max_pkt_len, rxq[i]);
657 if (i < dev->data->nb_rx_queues)
659 * It adds extra info for configuring VSI queues, which
660 * is needed to enable the configurable crc stripping
663 vc_qpei->rxq_ext.crcstrip =
664 dev->data->dev_conf.rxmode.hw_strip_crc;
666 memset(&args, 0, sizeof(args));
668 (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
669 args.in_args = (uint8_t *)vc_vqcei;
670 args.in_args_size = size;
671 args.out_buffer = cmd_result_buffer;
672 args.out_size = I40E_AQ_BUF_SZ;
673 ret = i40evf_execute_vf_cmd(dev, &args);
675 PMD_DRV_LOG(ERR, "Failed to execute command of "
676 "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
682 i40evf_configure_queues(struct rte_eth_dev *dev)
684 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
686 if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
687 /* To support DPDK PF host */
688 return i40evf_configure_vsi_queues_ext(dev);
690 /* To support Linux PF host */
691 return i40evf_configure_vsi_queues(dev);
695 i40evf_config_irq_map(struct rte_eth_dev *dev)
697 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
698 struct vf_cmd_info args;
699 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
700 sizeof(struct i40e_virtchnl_vector_map)];
701 struct i40e_virtchnl_irq_map_info *map_info;
703 map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
704 map_info->num_vectors = 1;
705 map_info->vecmap[0].rxitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
706 map_info->vecmap[0].txitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
707 map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
708 /* Alway use default dynamic MSIX interrupt */
709 map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
710 /* Don't map any tx queue */
711 map_info->vecmap[0].txq_map = 0;
712 map_info->vecmap[0].rxq_map = 0;
713 for (i = 0; i < dev->data->nb_rx_queues; i++)
714 map_info->vecmap[0].rxq_map |= 1 << i;
716 args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
717 args.in_args = (u8 *)cmd_buffer;
718 args.in_args_size = sizeof(cmd_buffer);
719 args.out_buffer = cmd_result_buffer;
720 args.out_size = I40E_AQ_BUF_SZ;
721 err = i40evf_execute_vf_cmd(dev, &args);
723 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
729 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
732 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
733 struct i40e_virtchnl_queue_select queue_select;
735 struct vf_cmd_info args;
736 memset(&queue_select, 0, sizeof(queue_select));
737 queue_select.vsi_id = vf->vsi_res->vsi_id;
740 queue_select.rx_queues |= 1 << qid;
742 queue_select.tx_queues |= 1 << qid;
745 args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
747 args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
748 args.in_args = (u8 *)&queue_select;
749 args.in_args_size = sizeof(queue_select);
750 args.out_buffer = cmd_result_buffer;
751 args.out_size = I40E_AQ_BUF_SZ;
752 err = i40evf_execute_vf_cmd(dev, &args);
754 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
755 isrx ? "RX" : "TX", qid, on ? "on" : "off");
761 i40evf_start_queues(struct rte_eth_dev *dev)
763 struct rte_eth_dev_data *dev_data = dev->data;
765 struct i40e_rx_queue *rxq;
766 struct i40e_tx_queue *txq;
768 for (i = 0; i < dev->data->nb_rx_queues; i++) {
769 rxq = dev_data->rx_queues[i];
770 if (rxq->rx_deferred_start)
772 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
773 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
778 for (i = 0; i < dev->data->nb_tx_queues; i++) {
779 txq = dev_data->tx_queues[i];
780 if (txq->tx_deferred_start)
782 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
783 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
792 i40evf_stop_queues(struct rte_eth_dev *dev)
796 /* Stop TX queues first */
797 for (i = 0; i < dev->data->nb_tx_queues; i++) {
798 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
799 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
804 /* Then stop RX queues */
805 for (i = 0; i < dev->data->nb_rx_queues; i++) {
806 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
807 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
816 i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
818 struct i40e_virtchnl_ether_addr_list *list;
819 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
820 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
821 sizeof(struct i40e_virtchnl_ether_addr)];
823 struct vf_cmd_info args;
825 if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
826 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
827 addr->addr_bytes[0], addr->addr_bytes[1],
828 addr->addr_bytes[2], addr->addr_bytes[3],
829 addr->addr_bytes[4], addr->addr_bytes[5]);
833 list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
834 list->vsi_id = vf->vsi_res->vsi_id;
835 list->num_elements = 1;
836 (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
837 sizeof(addr->addr_bytes));
839 args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
840 args.in_args = cmd_buffer;
841 args.in_args_size = sizeof(cmd_buffer);
842 args.out_buffer = cmd_result_buffer;
843 args.out_size = I40E_AQ_BUF_SZ;
844 err = i40evf_execute_vf_cmd(dev, &args);
846 PMD_DRV_LOG(ERR, "fail to execute command "
847 "OP_ADD_ETHER_ADDRESS");
853 i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
855 struct i40e_virtchnl_ether_addr_list *list;
856 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
857 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
858 sizeof(struct i40e_virtchnl_ether_addr)];
860 struct vf_cmd_info args;
862 if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
863 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
864 addr->addr_bytes[0], addr->addr_bytes[1],
865 addr->addr_bytes[2], addr->addr_bytes[3],
866 addr->addr_bytes[4], addr->addr_bytes[5]);
870 list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
871 list->vsi_id = vf->vsi_res->vsi_id;
872 list->num_elements = 1;
873 (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
874 sizeof(addr->addr_bytes));
876 args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
877 args.in_args = cmd_buffer;
878 args.in_args_size = sizeof(cmd_buffer);
879 args.out_buffer = cmd_result_buffer;
880 args.out_size = I40E_AQ_BUF_SZ;
881 err = i40evf_execute_vf_cmd(dev, &args);
883 PMD_DRV_LOG(ERR, "fail to execute command "
884 "OP_DEL_ETHER_ADDRESS");
890 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
892 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
893 struct i40e_virtchnl_queue_select q_stats;
894 struct i40e_eth_stats *pstats;
896 struct vf_cmd_info args;
898 memset(&q_stats, 0, sizeof(q_stats));
899 q_stats.vsi_id = vf->vsi_res->vsi_id;
900 args.ops = I40E_VIRTCHNL_OP_GET_STATS;
901 args.in_args = (u8 *)&q_stats;
902 args.in_args_size = sizeof(q_stats);
903 args.out_buffer = cmd_result_buffer;
904 args.out_size = I40E_AQ_BUF_SZ;
906 err = i40evf_execute_vf_cmd(dev, &args);
908 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
911 pstats = (struct i40e_eth_stats *)args.out_buffer;
912 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
913 pstats->rx_broadcast;
914 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
916 stats->ierrors = pstats->rx_discards;
917 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
918 stats->ibytes = pstats->rx_bytes;
919 stats->obytes = pstats->tx_bytes;
925 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
927 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
928 struct i40e_virtchnl_vlan_filter_list *vlan_list;
929 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
932 struct vf_cmd_info args;
934 vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
935 vlan_list->vsi_id = vf->vsi_res->vsi_id;
936 vlan_list->num_elements = 1;
937 vlan_list->vlan_id[0] = vlanid;
939 args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
940 args.in_args = (u8 *)&cmd_buffer;
941 args.in_args_size = sizeof(cmd_buffer);
942 args.out_buffer = cmd_result_buffer;
943 args.out_size = I40E_AQ_BUF_SZ;
944 err = i40evf_execute_vf_cmd(dev, &args);
946 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
952 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
954 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
955 struct i40e_virtchnl_vlan_filter_list *vlan_list;
956 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
959 struct vf_cmd_info args;
961 vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
962 vlan_list->vsi_id = vf->vsi_res->vsi_id;
963 vlan_list->num_elements = 1;
964 vlan_list->vlan_id[0] = vlanid;
966 args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
967 args.in_args = (u8 *)&cmd_buffer;
968 args.in_args_size = sizeof(cmd_buffer);
969 args.out_buffer = cmd_result_buffer;
970 args.out_size = I40E_AQ_BUF_SZ;
971 err = i40evf_execute_vf_cmd(dev, &args);
973 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
979 i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
982 struct vf_cmd_info args;
983 struct rte_eth_link *new_link;
985 args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
987 args.in_args_size = 0;
988 args.out_buffer = cmd_result_buffer;
989 args.out_size = I40E_AQ_BUF_SZ;
990 err = i40evf_execute_vf_cmd(dev, &args);
992 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
996 new_link = (struct rte_eth_link *)args.out_buffer;
997 (void)rte_memcpy(link, new_link, sizeof(*link));
1002 static const struct rte_pci_id pci_id_i40evf_map[] = {
1003 #define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
1004 #include "rte_pci_dev_ids.h"
1005 { .vendor_id = 0, /* sentinel */ },
1009 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
1010 struct rte_eth_link *link)
1012 struct rte_eth_link *dst = &(dev->data->dev_link);
1013 struct rte_eth_link *src = link;
1015 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1016 *(uint64_t *)src) == 0)
1023 i40evf_reset_vf(struct i40e_hw *hw)
1027 if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1028 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1032 * After issuing vf reset command to pf, pf won't necessarily
1033 * reset vf, it depends on what state it exactly is. If it's not
1034 * initialized yet, it won't have vf reset since it's in a certain
1035 * state. If not, it will try to reset. Even vf is reset, pf will
1036 * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1037 * it to ACTIVE. In this duration, vf may not catch the moment that
1038 * COMPLETE is set. So, for vf, we'll try to wait a long time.
1042 for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1043 reset = rd32(hw, I40E_VFGEN_RSTAT) &
1044 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1045 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1046 if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
1052 if (i >= MAX_RESET_WAIT_CNT) {
1053 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1061 i40evf_init_vf(struct rte_eth_dev *dev)
1064 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1065 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1067 vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1068 vf->dev_data = dev->data;
1069 err = i40evf_set_mac_type(hw);
1071 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1075 i40e_init_adminq_parameter(hw);
1076 err = i40e_init_adminq(hw);
1078 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1083 /* Reset VF and wait until it's complete */
1084 if (i40evf_reset_vf(hw)) {
1085 PMD_INIT_LOG(ERR, "reset NIC failed");
1089 /* VF reset, shutdown admin queue and initialize again */
1090 if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1091 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1095 i40e_init_adminq_parameter(hw);
1096 if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1097 PMD_INIT_LOG(ERR, "init_adminq failed");
1100 if (i40evf_check_api_version(dev) != 0) {
1101 PMD_INIT_LOG(ERR, "check_api version failed");
1104 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1105 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1106 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1108 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1112 if (i40evf_get_vf_resource(dev) != 0) {
1113 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1117 /* got VF config message back from PF, now we can parse it */
1118 for (i = 0; i < vf->vf_res->num_vsis; i++) {
1119 if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
1120 vf->vsi_res = &vf->vf_res->vsi_res[i];
1124 PMD_INIT_LOG(ERR, "no LAN VSI found");
1128 vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1129 vf->vsi.type = vf->vsi_res->vsi_type;
1130 vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1132 /* check mac addr, if it's not valid, genrate one */
1133 if (I40E_SUCCESS != i40e_validate_mac_addr(\
1134 vf->vsi_res->default_mac_addr))
1135 eth_random_addr(vf->vsi_res->default_mac_addr);
1137 ether_addr_copy((struct ether_addr *)vf->vsi_res->default_mac_addr,
1138 (struct ether_addr *)hw->mac.addr);
1143 rte_free(vf->vf_res);
1145 i40e_shutdown_adminq(hw); /* ignore error */
1151 i40evf_dev_init(struct rte_eth_dev *eth_dev)
1153 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
1154 eth_dev->data->dev_private);
1156 PMD_INIT_FUNC_TRACE();
1158 /* assign ops func pointer */
1159 eth_dev->dev_ops = &i40evf_eth_dev_ops;
1160 eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1161 eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1164 * For secondary processes, we don't initialise any further as primary
1165 * has already done this work.
1167 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1168 if (eth_dev->data->scattered_rx)
1169 eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
1173 hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
1174 hw->device_id = eth_dev->pci_dev->id.device_id;
1175 hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
1176 hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
1177 hw->bus.device = eth_dev->pci_dev->addr.devid;
1178 hw->bus.func = eth_dev->pci_dev->addr.function;
1179 hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1181 if(i40evf_init_vf(eth_dev) != 0) {
1182 PMD_INIT_LOG(ERR, "Init vf failed");
1187 eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1189 if (eth_dev->data->mac_addrs == NULL) {
1190 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
1191 "store MAC addresses", ETHER_ADDR_LEN);
1194 ether_addr_copy((struct ether_addr *)hw->mac.addr,
1195 (struct ether_addr *)eth_dev->data->mac_addrs);
1201 * virtual function driver struct
1203 static struct eth_driver rte_i40evf_pmd = {
1205 .name = "rte_i40evf_pmd",
1206 .id_table = pci_id_i40evf_map,
1207 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1209 .eth_dev_init = i40evf_dev_init,
1210 .dev_private_size = sizeof(struct i40e_vf),
1214 * VF Driver initialization routine.
1215 * Invoked one at EAL init time.
1216 * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
1219 rte_i40evf_pmd_init(const char *name __rte_unused,
1220 const char *params __rte_unused)
1222 PMD_INIT_FUNC_TRACE();
1224 rte_eth_driver_register(&rte_i40evf_pmd);
1229 static struct rte_driver rte_i40evf_driver = {
1231 .init = rte_i40evf_pmd_init,
1234 PMD_REGISTER_DRIVER(rte_i40evf_driver);
1237 i40evf_dev_configure(struct rte_eth_dev *dev)
1239 return i40evf_init_vlan(dev);
1243 i40evf_init_vlan(struct rte_eth_dev *dev)
1245 struct rte_eth_dev_data *data = dev->data;
1248 /* Apply vlan offload setting */
1249 i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1251 /* Apply pvid setting */
1252 ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
1253 data->dev_conf.txmode.hw_vlan_insert_pvid);
1258 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1260 bool enable_vlan_strip = 0;
1261 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1262 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1264 /* Linux pf host doesn't support vlan offload yet */
1265 if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1266 /* Vlan stripping setting */
1267 if (mask & ETH_VLAN_STRIP_MASK) {
1268 /* Enable or disable VLAN stripping */
1269 if (dev_conf->rxmode.hw_vlan_strip)
1270 enable_vlan_strip = 1;
1272 enable_vlan_strip = 0;
1274 i40evf_config_vlan_offload(dev, enable_vlan_strip);
1280 i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1282 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1283 struct i40e_vsi_vlan_pvid_info info;
1284 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1286 memset(&info, 0, sizeof(info));
1289 /* Linux pf host don't support vlan offload yet */
1290 if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1292 info.config.pvid = pvid;
1294 info.config.reject.tagged =
1295 dev_conf->txmode.hw_vlan_reject_tagged;
1296 info.config.reject.untagged =
1297 dev_conf->txmode.hw_vlan_reject_untagged;
1299 return i40evf_config_vlan_pvid(dev, &info);
1306 i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1308 struct i40e_rx_queue *rxq;
1310 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1312 PMD_INIT_FUNC_TRACE();
1314 if (rx_queue_id < dev->data->nb_rx_queues) {
1315 rxq = dev->data->rx_queues[rx_queue_id];
1317 err = i40e_alloc_rx_queue_mbufs(rxq);
1319 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1325 /* Init the RX tail register. */
1326 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1327 I40EVF_WRITE_FLUSH(hw);
1329 /* Ready to switch the queue on */
1330 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1333 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1341 i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1343 struct i40e_rx_queue *rxq;
1346 if (rx_queue_id < dev->data->nb_rx_queues) {
1347 rxq = dev->data->rx_queues[rx_queue_id];
1349 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1352 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1357 i40e_rx_queue_release_mbufs(rxq);
1358 i40e_reset_rx_queue(rxq);
1365 i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1369 PMD_INIT_FUNC_TRACE();
1371 if (tx_queue_id < dev->data->nb_tx_queues) {
1373 /* Ready to switch the queue on */
1374 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1377 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1385 i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1387 struct i40e_tx_queue *txq;
1390 if (tx_queue_id < dev->data->nb_tx_queues) {
1391 txq = dev->data->tx_queues[tx_queue_id];
1393 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1396 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
1401 i40e_tx_queue_release_mbufs(txq);
1402 i40e_reset_tx_queue(txq);
1409 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1414 ret = i40evf_add_vlan(dev, vlan_id);
1416 ret = i40evf_del_vlan(dev,vlan_id);
1422 i40evf_rx_init(struct rte_eth_dev *dev)
1424 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1426 struct i40e_rx_queue **rxq =
1427 (struct i40e_rx_queue **)dev->data->rx_queues;
1428 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1430 i40evf_config_rss(vf);
1431 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1432 rxq[i]->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(i);
1433 I40E_PCI_REG_WRITE(rxq[i]->qrx_tail, rxq[i]->nb_rx_desc - 1);
1436 /* Flush the operation to write registers */
1437 I40EVF_WRITE_FLUSH(hw);
1443 i40evf_tx_init(struct rte_eth_dev *dev)
1446 struct i40e_tx_queue **txq =
1447 (struct i40e_tx_queue **)dev->data->tx_queues;
1448 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1450 for (i = 0; i < dev->data->nb_tx_queues; i++)
1451 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1455 i40evf_enable_queues_intr(struct i40e_hw *hw)
1457 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1458 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1459 I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1463 i40evf_disable_queues_intr(struct i40e_hw *hw)
1465 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1470 i40evf_dev_start(struct rte_eth_dev *dev)
1472 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1473 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1474 struct ether_addr mac_addr;
1476 PMD_INIT_FUNC_TRACE();
1478 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1479 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1480 if (vf->max_pkt_len <= ETHER_MAX_LEN ||
1481 vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1482 PMD_DRV_LOG(ERR, "maximum packet length must "
1483 "be larger than %u and smaller than %u,"
1484 "as jumbo frame is enabled",
1485 (uint32_t)ETHER_MAX_LEN,
1486 (uint32_t)I40E_FRAME_SIZE_MAX);
1487 return I40E_ERR_CONFIG;
1490 if (vf->max_pkt_len < ETHER_MIN_LEN ||
1491 vf->max_pkt_len > ETHER_MAX_LEN) {
1492 PMD_DRV_LOG(ERR, "maximum packet length must be "
1493 "larger than %u and smaller than %u, "
1494 "as jumbo frame is disabled",
1495 (uint32_t)ETHER_MIN_LEN,
1496 (uint32_t)ETHER_MAX_LEN);
1497 return I40E_ERR_CONFIG;
1501 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1502 dev->data->nb_tx_queues);
1504 if (i40evf_rx_init(dev) != 0){
1505 PMD_DRV_LOG(ERR, "failed to do RX init");
1509 i40evf_tx_init(dev);
1511 if (i40evf_configure_queues(dev) != 0) {
1512 PMD_DRV_LOG(ERR, "configure queues failed");
1515 if (i40evf_config_irq_map(dev)) {
1516 PMD_DRV_LOG(ERR, "config_irq_map failed");
1521 (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
1522 sizeof(mac_addr.addr_bytes));
1523 if (i40evf_add_mac_addr(dev, &mac_addr)) {
1524 PMD_DRV_LOG(ERR, "Failed to add mac addr");
1528 if (i40evf_start_queues(dev) != 0) {
1529 PMD_DRV_LOG(ERR, "enable queues failed");
1533 i40evf_enable_queues_intr(hw);
1537 i40evf_del_mac_addr(dev, &mac_addr);
1543 i40evf_dev_stop(struct rte_eth_dev *dev)
1545 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1547 PMD_INIT_FUNC_TRACE();
1549 i40evf_disable_queues_intr(hw);
1550 i40evf_stop_queues(dev);
1554 i40evf_dev_link_update(struct rte_eth_dev *dev,
1555 __rte_unused int wait_to_complete)
1557 struct rte_eth_link new_link;
1558 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1560 * DPDK pf host provide interfacet to acquire link status
1561 * while Linux driver does not
1563 if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1564 i40evf_get_link_status(dev, &new_link);
1566 /* Always assume it's up, for Linux driver PF host */
1567 new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
1568 new_link.link_speed = ETH_LINK_SPEED_10000;
1569 new_link.link_status = 1;
1571 i40evf_dev_atomic_write_link_status(dev, &new_link);
1577 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1579 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1582 /* If enabled, just return */
1583 if (vf->promisc_unicast_enabled)
1586 ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
1588 vf->promisc_unicast_enabled = TRUE;
1592 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1594 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1597 /* If disabled, just return */
1598 if (!vf->promisc_unicast_enabled)
1601 ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
1603 vf->promisc_unicast_enabled = FALSE;
1607 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1609 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1612 /* If enabled, just return */
1613 if (vf->promisc_multicast_enabled)
1616 ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
1618 vf->promisc_multicast_enabled = TRUE;
1622 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1624 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1627 /* If enabled, just return */
1628 if (!vf->promisc_multicast_enabled)
1631 ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
1633 vf->promisc_multicast_enabled = FALSE;
1637 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1639 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1641 memset(dev_info, 0, sizeof(*dev_info));
1642 dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
1643 dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
1644 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1645 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1646 dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
1647 dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
1649 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1651 .pthresh = I40E_DEFAULT_RX_PTHRESH,
1652 .hthresh = I40E_DEFAULT_RX_HTHRESH,
1653 .wthresh = I40E_DEFAULT_RX_WTHRESH,
1655 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1659 dev_info->default_txconf = (struct rte_eth_txconf) {
1661 .pthresh = I40E_DEFAULT_TX_PTHRESH,
1662 .hthresh = I40E_DEFAULT_TX_HTHRESH,
1663 .wthresh = I40E_DEFAULT_TX_WTHRESH,
1665 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1666 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1667 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1668 ETH_TXQ_FLAGS_NOOFFLOADS,
1673 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1675 if (i40evf_get_statics(dev, stats))
1676 PMD_DRV_LOG(ERR, "Get statics failed");
1680 i40evf_dev_close(struct rte_eth_dev *dev)
1682 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1684 i40evf_dev_stop(dev);
1685 i40evf_reset_vf(hw);
1686 i40e_shutdown_adminq(hw);
1690 i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
1691 struct rte_eth_rss_reta_entry64 *reta_conf,
1694 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1697 uint16_t idx, shift;
1700 if (reta_size != ETH_RSS_RETA_SIZE_64) {
1701 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1702 "(%d) doesn't match the number of hardware can "
1703 "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
1707 for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1708 idx = i / RTE_RETA_GROUP_SIZE;
1709 shift = i % RTE_RETA_GROUP_SIZE;
1710 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1714 if (mask == I40E_4_BIT_MASK)
1717 l = I40E_READ_REG(hw, I40E_VFQF_HLUT(i >> 2));
1719 for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
1720 if (mask & (0x1 << j))
1721 lut |= reta_conf[idx].reta[shift + j] <<
1724 lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
1726 I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
1733 i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
1734 struct rte_eth_rss_reta_entry64 *reta_conf,
1737 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1740 uint16_t idx, shift;
1743 if (reta_size != ETH_RSS_RETA_SIZE_64) {
1744 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1745 "(%d) doesn't match the number of hardware can "
1746 "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
1750 for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1751 idx = i / RTE_RETA_GROUP_SIZE;
1752 shift = i % RTE_RETA_GROUP_SIZE;
1753 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1758 lut = I40E_READ_REG(hw, I40E_VFQF_HLUT(i >> 2));
1759 for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
1760 if (mask & (0x1 << j))
1761 reta_conf[idx].reta[shift + j] =
1762 ((lut >> (CHAR_BIT * j)) &
1771 i40evf_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
1774 uint8_t hash_key_len;
1775 uint64_t rss_hf, hena;
1777 hash_key = (uint32_t *)(rss_conf->rss_key);
1778 hash_key_len = rss_conf->rss_key_len;
1779 if (hash_key != NULL && hash_key_len >=
1780 (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
1783 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1784 I40E_WRITE_REG(hw, I40E_VFQF_HKEY(i), hash_key[i]);
1787 rss_hf = rss_conf->rss_hf;
1788 hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1789 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1790 hena &= ~I40E_RSS_HENA_ALL;
1791 hena |= i40e_config_hena(rss_hf);
1792 I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
1793 I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
1794 I40EVF_WRITE_FLUSH(hw);
1800 i40evf_disable_rss(struct i40e_vf *vf)
1802 struct i40e_hw *hw = I40E_VF_TO_HW(vf);
1805 hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1806 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1807 hena &= ~I40E_RSS_HENA_ALL;
1808 I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
1809 I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
1810 I40EVF_WRITE_FLUSH(hw);
1814 i40evf_config_rss(struct i40e_vf *vf)
1816 struct i40e_hw *hw = I40E_VF_TO_HW(vf);
1817 struct rte_eth_rss_conf rss_conf;
1818 uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
1820 if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
1821 i40evf_disable_rss(vf);
1822 PMD_DRV_LOG(DEBUG, "RSS not configured\n");
1826 /* Fill out the look up table */
1827 for (i = 0, j = 0; i < nb_q; i++, j++) {
1828 if (j >= vf->num_queue_pairs)
1830 lut = (lut << 8) | j;
1832 I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
1835 rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
1836 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
1837 i40evf_disable_rss(vf);
1838 PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
1842 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < nb_q) {
1843 /* Calculate the default hash key */
1844 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1845 rss_key_default[i] = (uint32_t)rte_rand();
1846 rss_conf.rss_key = (uint8_t *)rss_key_default;
1847 rss_conf.rss_key_len = nb_q;
1850 return i40evf_hw_rss_hash_set(hw, &rss_conf);
1854 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
1855 struct rte_eth_rss_conf *rss_conf)
1857 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1858 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
1861 hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1862 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1863 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
1864 if (rss_hf != 0) /* Enable RSS */
1870 if (rss_hf == 0) /* Disable RSS */
1873 return i40evf_hw_rss_hash_set(hw, rss_conf);
1877 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1878 struct rte_eth_rss_conf *rss_conf)
1880 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1881 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
1886 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1887 hash_key[i] = I40E_READ_REG(hw, I40E_VFQF_HKEY(i));
1888 rss_conf->rss_key_len = i * sizeof(uint32_t);
1890 hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
1891 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
1892 rss_conf->rss_hf = i40e_parse_hena(hena);