4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
46 #include <rte_interrupts.h>
48 #include <rte_debug.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_tailq.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
63 #include "i40e_logs.h"
64 #include "i40e/i40e_prototype.h"
65 #include "i40e/i40e_adminq_cmd.h"
66 #include "i40e/i40e_type.h"
68 #include "i40e_rxtx.h"
69 #include "i40e_ethdev.h"
71 #define I40EVF_VSI_DEFAULT_MSIX_INTR 1
73 /* busy wait delay in msec */
74 #define I40EVF_BUSY_WAIT_DELAY 10
75 #define I40EVF_BUSY_WAIT_COUNT 50
76 #define MAX_RESET_WAIT_CNT 20
78 struct i40evf_arq_msg_info {
79 enum i40e_virtchnl_ops ops;
80 enum i40e_status_code result;
86 enum i40e_virtchnl_ops ops;
88 uint32_t in_args_size;
90 /* Input & output type. pass in buffer size and pass out
91 * actual return result
96 enum i40evf_aq_result {
97 I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
98 I40EVF_MSG_NON, /* Read nothing from admin queue */
99 I40EVF_MSG_SYS, /* Read system msg from admin queue */
100 I40EVF_MSG_CMD, /* Read async command result */
103 /* A share buffer to store the command result from PF driver */
104 static uint8_t cmd_result_buffer[I40E_AQ_BUF_SZ];
106 static int i40evf_dev_configure(struct rte_eth_dev *dev);
107 static int i40evf_dev_start(struct rte_eth_dev *dev);
108 static void i40evf_dev_stop(struct rte_eth_dev *dev);
109 static void i40evf_dev_info_get(struct rte_eth_dev *dev,
110 struct rte_eth_dev_info *dev_info);
111 static int i40evf_dev_link_update(struct rte_eth_dev *dev,
112 __rte_unused int wait_to_complete);
113 static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
114 struct rte_eth_stats *stats);
115 static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
116 uint16_t vlan_id, int on);
117 static void i40evf_dev_close(struct rte_eth_dev *dev);
118 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
119 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
120 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
121 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
122 static int i40evf_get_link_status(struct rte_eth_dev *dev,
123 struct rte_eth_link *link);
124 static struct eth_dev_ops i40evf_eth_dev_ops = {
125 .dev_configure = i40evf_dev_configure,
126 .dev_start = i40evf_dev_start,
127 .dev_stop = i40evf_dev_stop,
128 .promiscuous_enable = i40evf_dev_promiscuous_enable,
129 .promiscuous_disable = i40evf_dev_promiscuous_disable,
130 .allmulticast_enable = i40evf_dev_allmulticast_enable,
131 .allmulticast_disable = i40evf_dev_allmulticast_disable,
132 .link_update = i40evf_dev_link_update,
133 .stats_get = i40evf_dev_stats_get,
134 .dev_close = i40evf_dev_close,
135 .dev_infos_get = i40evf_dev_info_get,
136 .vlan_filter_set = i40evf_vlan_filter_set,
137 .rx_queue_setup = i40e_dev_rx_queue_setup,
138 .rx_queue_release = i40e_dev_rx_queue_release,
139 .tx_queue_setup = i40e_dev_tx_queue_setup,
140 .tx_queue_release = i40e_dev_tx_queue_release,
144 i40evf_set_mac_type(struct i40e_hw *hw)
146 int status = I40E_ERR_DEVICE_NOT_SUPPORTED;
148 if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
149 switch (hw->device_id) {
151 case I40E_DEV_ID_VF_HV:
152 hw->mac.type = I40E_MAC_VF;
153 status = I40E_SUCCESS;
164 * Parse admin queue message.
169 * > 0: read cmd result
171 static enum i40evf_aq_result
172 i40evf_parse_pfmsg(struct i40e_vf *vf,
173 struct i40e_arq_event_info *event,
174 struct i40evf_arq_msg_info *data)
176 enum i40e_virtchnl_ops opcode = (enum i40e_virtchnl_ops)\
177 rte_le_to_cpu_32(event->desc.cookie_high);
178 enum i40e_status_code retval = (enum i40e_status_code)\
179 rte_le_to_cpu_32(event->desc.cookie_low);
180 enum i40evf_aq_result ret = I40EVF_MSG_CMD;
183 if (opcode == I40E_VIRTCHNL_OP_EVENT) {
184 struct i40e_virtchnl_pf_event *vpe =
185 (struct i40e_virtchnl_pf_event *)event->msg_buf;
187 /* Initialize ret to sys event */
188 ret = I40EVF_MSG_SYS;
189 switch (vpe->event) {
190 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
192 vpe->event_data.link_event.link_status;
193 vf->pend_msg |= PFMSG_LINK_CHANGE;
194 PMD_DRV_LOG(INFO, "Link status update:%s\n",
195 vf->link_up ? "up" : "down");
197 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
199 vf->pend_msg |= PFMSG_RESET_IMPENDING;
200 PMD_DRV_LOG(INFO, "vf is reseting\n");
202 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
203 vf->dev_closed = true;
204 vf->pend_msg |= PFMSG_DRIVER_CLOSE;
205 PMD_DRV_LOG(INFO, "PF driver closed\n");
209 "%s: Unknown event %d from pf\n",
210 __func__, vpe->event);
213 /* async reply msg on command issued by vf previously */
214 ret = I40EVF_MSG_CMD;
215 /* Actual buffer length read from PF */
216 data->msg_len = event->msg_size;
218 /* fill the ops and result to notify VF */
219 data->result = retval;
226 * Read data in admin queue to get msg from pf driver
228 static enum i40evf_aq_result
229 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
231 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
232 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
233 struct i40e_arq_event_info event;
235 enum i40evf_aq_result result = I40EVF_MSG_NON;
237 event.msg_size = data->msg_len;
238 event.msg_buf = data->msg;
239 ret = i40e_clean_arq_element(hw, &event, NULL);
240 /* Can't read any msg from adminQ */
242 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
243 result = I40EVF_MSG_NON;
245 result = I40EVF_MSG_ERR;
249 /* Parse the event */
250 result = i40evf_parse_pfmsg(vf, &event, data);
256 * Polling read until command result return from pf driver or meet error.
259 i40evf_wait_cmd_done(struct rte_eth_dev *dev,
260 struct i40evf_arq_msg_info *data)
263 enum i40evf_aq_result ret;
265 #define MAX_TRY_TIMES 10
266 #define ASQ_DELAY_MS 50
268 /* Delay some time first */
269 rte_delay_ms(ASQ_DELAY_MS);
270 ret = i40evf_read_pfmsg(dev, data);
272 if (ret == I40EVF_MSG_CMD)
274 else if (ret == I40EVF_MSG_ERR)
277 /* If don't read msg or read sys event, continue */
278 } while(i++ < MAX_TRY_TIMES);
284 * clear current command. Only call in case execute
285 * _atomic_set_cmd successfully.
288 _clear_cmd(struct i40e_vf *vf)
291 vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
295 * Check there is pending cmd in execution. If none, set new command.
298 _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
300 int ret = rte_atomic32_cmpset(&vf->pend_cmd,
301 I40E_VIRTCHNL_OP_UNKNOWN, ops);
304 PMD_DRV_LOG(ERR, "There is incomplete cmd %d\n", vf->pend_cmd);
310 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
312 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
313 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
315 struct i40evf_arq_msg_info info;
317 if (_atomic_set_cmd(vf, args->ops))
320 info.msg = args->out_buffer;
321 info.msg_len = args->out_size;
322 info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
323 info.result = I40E_SUCCESS;
325 err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
326 args->in_args, args->in_args_size, NULL);
328 PMD_DRV_LOG(ERR, "fail to send cmd %d\n", args->ops);
332 err = i40evf_wait_cmd_done(dev, &info);
333 /* read message and it's expected one */
334 if (!err && args->ops == info.ops)
337 PMD_DRV_LOG(ERR, "Failed to read message from AdminQ\n");
338 else if (args->ops != info.ops)
339 PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u\n",
340 args->ops, info.ops);
342 return (err | info.result);
346 * Check API version with sync wait until version read or fail from admin queue
349 i40evf_check_api_version(struct rte_eth_dev *dev)
351 struct i40e_virtchnl_version_info version, *pver;
353 struct vf_cmd_info args;
354 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
356 version.major = I40E_VIRTCHNL_VERSION_MAJOR;
357 version.minor = I40E_VIRTCHNL_VERSION_MINOR;
359 args.ops = I40E_VIRTCHNL_OP_VERSION;
360 args.in_args = (uint8_t *)&version;
361 args.in_args_size = sizeof(version);
362 args.out_buffer = cmd_result_buffer;
363 args.out_size = I40E_AQ_BUF_SZ;
365 err = i40evf_execute_vf_cmd(dev, &args);
367 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION\n");
371 pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
372 /* We are talking with DPDK host */
373 if (pver->major == I40E_DPDK_VERSION_MAJOR) {
374 vf->host_is_dpdk = TRUE;
375 PMD_DRV_LOG(INFO, "Detect PF host is DPDK app\n");
377 /* It's linux host driver */
378 else if ((pver->major != version.major) ||
379 (pver->minor != version.minor)) {
380 PMD_INIT_LOG(ERR, "pf/vf API version mismatch. "
381 "(%u.%u)-(%u.%u)\n", pver->major, pver->minor,
382 version.major, version.minor);
390 i40evf_get_vf_resource(struct rte_eth_dev *dev)
392 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
393 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
395 struct vf_cmd_info args;
398 args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
400 args.in_args_size = 0;
401 args.out_buffer = cmd_result_buffer;
402 args.out_size = I40E_AQ_BUF_SZ;
404 err = i40evf_execute_vf_cmd(dev, &args);
407 PMD_DRV_LOG(ERR, "fail to execute command "
408 "OP_GET_VF_RESOURCE\n");
412 len = sizeof(struct i40e_virtchnl_vf_resource) +
413 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
415 (void)rte_memcpy(vf->vf_res, args.out_buffer,
416 RTE_MIN(args.out_size, len));
417 i40e_vf_parse_hw_config(hw, vf->vf_res);
423 i40evf_config_promisc(struct rte_eth_dev *dev,
425 bool enable_multicast)
427 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
429 struct vf_cmd_info args;
430 struct i40e_virtchnl_promisc_info promisc;
433 promisc.vsi_id = vf->vsi_res->vsi_id;
436 promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
438 if (enable_multicast)
439 promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
441 args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
442 args.in_args = (uint8_t *)&promisc;
443 args.in_args_size = sizeof(promisc);
444 args.out_buffer = cmd_result_buffer;
445 args.out_size = I40E_AQ_BUF_SZ;
447 err = i40evf_execute_vf_cmd(dev, &args);
450 PMD_DRV_LOG(ERR, "fail to execute command "
451 "CONFIG_PROMISCUOUS_MODE\n");
457 i40evf_configure_queues(struct rte_eth_dev *dev)
459 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
460 struct i40e_virtchnl_vsi_queue_config_info *queue_info;
461 struct i40e_virtchnl_queue_pair_info *queue_cfg;
462 struct i40e_rx_queue **rxq =
463 (struct i40e_rx_queue **)dev->data->rx_queues;
464 struct i40e_tx_queue **txq =
465 (struct i40e_tx_queue **)dev->data->tx_queues;
466 int i, len, nb_qpairs, num_rxq, num_txq;
468 struct vf_cmd_info args;
469 struct rte_pktmbuf_pool_private *mbp_priv;
471 nb_qpairs = vf->num_queue_pairs;
472 len = sizeof(*queue_info) + sizeof(*queue_cfg) * nb_qpairs;
473 queue_info = rte_zmalloc("queue_info", len, 0);
474 if (queue_info == NULL) {
475 PMD_INIT_LOG(ERR, "failed alloc memory for queue_info\n");
478 queue_info->vsi_id = vf->vsi_res->vsi_id;
479 queue_info->num_queue_pairs = nb_qpairs;
480 queue_cfg = queue_info->qpair;
482 num_rxq = dev->data->nb_rx_queues;
483 num_txq = dev->data->nb_tx_queues;
485 * PF host driver required to configure queues in pairs, which means
486 * rxq_num should equals to txq_num. The actual usage won't always
487 * work that way. The solution is fills 0 with HW ring option in case
488 * they are not equal.
490 for (i = 0; i < nb_qpairs; i++) {
492 queue_cfg->txq.vsi_id = queue_info->vsi_id;
493 queue_cfg->txq.queue_id = i;
495 queue_cfg->txq.ring_len = txq[i]->nb_tx_desc;
496 queue_cfg->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
498 queue_cfg->txq.ring_len = 0;
499 queue_cfg->txq.dma_ring_addr = 0;
503 queue_cfg->rxq.vsi_id = queue_info->vsi_id;
504 queue_cfg->rxq.queue_id = i;
505 queue_cfg->rxq.max_pkt_size = vf->max_pkt_len;
507 mbp_priv = rte_mempool_get_priv(rxq[i]->mp);
508 queue_cfg->rxq.databuffer_size = mbp_priv->mbuf_data_room_size -
509 RTE_PKTMBUF_HEADROOM;;
510 queue_cfg->rxq.ring_len = rxq[i]->nb_rx_desc;
511 queue_cfg->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;;
513 queue_cfg->rxq.ring_len = 0;
514 queue_cfg->rxq.dma_ring_addr = 0;
515 queue_cfg->rxq.databuffer_size = 0;
520 args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
521 args.in_args = (u8 *)queue_info;
522 args.in_args_size = len;
523 args.out_buffer = cmd_result_buffer;
524 args.out_size = I40E_AQ_BUF_SZ;
525 err = i40evf_execute_vf_cmd(dev, &args);
527 PMD_DRV_LOG(ERR, "fail to execute command "
528 "OP_CONFIG_VSI_QUEUES\n");
529 rte_free(queue_info);
535 i40evf_config_irq_map(struct rte_eth_dev *dev)
537 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
538 struct vf_cmd_info args;
539 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
540 sizeof(struct i40e_virtchnl_vector_map)];
541 struct i40e_virtchnl_irq_map_info *map_info;
543 map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
544 map_info->num_vectors = 1;
545 map_info->vecmap[0].rxitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
546 map_info->vecmap[0].txitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
547 map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
548 /* Alway use default dynamic MSIX interrupt */
549 map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
550 /* Don't map any tx queue */
551 map_info->vecmap[0].txq_map = 0;
552 map_info->vecmap[0].rxq_map = 0;
553 for (i = 0; i < dev->data->nb_rx_queues; i++)
554 map_info->vecmap[0].rxq_map |= 1 << i;
556 args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
557 args.in_args = (u8 *)cmd_buffer;
558 args.in_args_size = sizeof(cmd_buffer);
559 args.out_buffer = cmd_result_buffer;
560 args.out_size = I40E_AQ_BUF_SZ;
561 err = i40evf_execute_vf_cmd(dev, &args);
563 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES\n");
569 i40evf_enable_queues(struct rte_eth_dev *dev)
571 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
572 struct i40e_virtchnl_queue_select queue_select;
574 struct vf_cmd_info args;
576 queue_select.vsi_id = vf->vsi_res->vsi_id;
578 queue_select.rx_queues = 0;
579 /* Enable configured RX queues */
580 for (i = 0; i < dev->data->nb_rx_queues; i++)
581 queue_select.rx_queues |= 1 << i;
583 /* Enable configured TX queues */
584 queue_select.tx_queues = 0;
585 for (i = 0; i < dev->data->nb_tx_queues; i++)
586 queue_select.tx_queues |= 1 << i;
588 args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
589 args.in_args = (u8 *)&queue_select;
590 args.in_args_size = sizeof(queue_select);
591 args.out_buffer = cmd_result_buffer;
592 args.out_size = I40E_AQ_BUF_SZ;
593 err = i40evf_execute_vf_cmd(dev, &args);
595 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES\n");
601 i40evf_disable_queues(struct rte_eth_dev *dev)
603 struct i40e_virtchnl_queue_select queue_select;
605 struct vf_cmd_info args;
607 /* Enable configured RX queues */
608 queue_select.rx_queues = 0;
609 for (i = 0; i < dev->data->nb_rx_queues; i++)
610 queue_select.rx_queues |= 1 << i;
612 /* Enable configured TX queues */
613 queue_select.tx_queues = 0;
614 for (i = 0; i < dev->data->nb_tx_queues; i++)
615 queue_select.tx_queues |= 1 << i;
617 args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
618 args.in_args = (u8 *)&queue_select;
619 args.in_args_size = sizeof(queue_select);
620 args.out_buffer = cmd_result_buffer;
621 args.out_size = I40E_AQ_BUF_SZ;
622 err = i40evf_execute_vf_cmd(dev, &args);
624 PMD_DRV_LOG(ERR, "fail to execute command "
625 "OP_DISABLE_QUEUES\n");
631 i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
633 struct i40e_virtchnl_ether_addr_list *list;
634 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
635 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
636 sizeof(struct i40e_virtchnl_ether_addr)];
638 struct vf_cmd_info args;
640 if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
641 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x\n",
642 addr->addr_bytes[0], addr->addr_bytes[1],
643 addr->addr_bytes[2], addr->addr_bytes[3],
644 addr->addr_bytes[4], addr->addr_bytes[5]);
648 list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
649 list->vsi_id = vf->vsi_res->vsi_id;
650 list->num_elements = 1;
651 (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
652 sizeof(addr->addr_bytes));
654 args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
655 args.in_args = cmd_buffer;
656 args.in_args_size = sizeof(cmd_buffer);
657 args.out_buffer = cmd_result_buffer;
658 args.out_size = I40E_AQ_BUF_SZ;
659 err = i40evf_execute_vf_cmd(dev, &args);
661 PMD_DRV_LOG(ERR, "fail to execute command "
662 "OP_ADD_ETHER_ADDRESS\n");
668 i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
670 struct i40e_virtchnl_ether_addr_list *list;
671 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
672 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
673 sizeof(struct i40e_virtchnl_ether_addr)];
675 struct vf_cmd_info args;
677 if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
678 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x\n",
679 addr->addr_bytes[0], addr->addr_bytes[1],
680 addr->addr_bytes[2], addr->addr_bytes[3],
681 addr->addr_bytes[4], addr->addr_bytes[5]);
685 list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
686 list->vsi_id = vf->vsi_res->vsi_id;
687 list->num_elements = 1;
688 (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
689 sizeof(addr->addr_bytes));
691 args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
692 args.in_args = cmd_buffer;
693 args.in_args_size = sizeof(cmd_buffer);
694 args.out_buffer = cmd_result_buffer;
695 args.out_size = I40E_AQ_BUF_SZ;
696 err = i40evf_execute_vf_cmd(dev, &args);
698 PMD_DRV_LOG(ERR, "fail to execute command "
699 "OP_DEL_ETHER_ADDRESS\n");
705 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
707 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
708 struct i40e_virtchnl_queue_select q_stats;
709 struct i40e_eth_stats *pstats;
711 struct vf_cmd_info args;
713 memset(&q_stats, 0, sizeof(q_stats));
714 q_stats.vsi_id = vf->vsi_res->vsi_id;
715 args.ops = I40E_VIRTCHNL_OP_GET_STATS;
716 args.in_args = (u8 *)&q_stats;
717 args.in_args_size = sizeof(q_stats);
718 args.out_buffer = cmd_result_buffer;
719 args.out_size = I40E_AQ_BUF_SZ;
721 err = i40evf_execute_vf_cmd(dev, &args);
723 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS\n");
726 pstats = (struct i40e_eth_stats *)args.out_buffer;
727 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
728 pstats->rx_broadcast;
729 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
731 stats->ierrors = pstats->rx_discards;
732 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
733 stats->ibytes = pstats->rx_bytes;
734 stats->obytes = pstats->tx_bytes;
740 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
742 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
743 struct i40e_virtchnl_vlan_filter_list *vlan_list;
744 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
747 struct vf_cmd_info args;
749 vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
750 vlan_list->vsi_id = vf->vsi_res->vsi_id;
751 vlan_list->num_elements = 1;
752 vlan_list->vlan_id[0] = vlanid;
754 args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
755 args.in_args = (u8 *)&cmd_buffer;
756 args.in_args_size = sizeof(cmd_buffer);
757 args.out_buffer = cmd_result_buffer;
758 args.out_size = I40E_AQ_BUF_SZ;
759 err = i40evf_execute_vf_cmd(dev, &args);
761 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN\n");
767 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
769 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
770 struct i40e_virtchnl_vlan_filter_list *vlan_list;
771 uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
774 struct vf_cmd_info args;
776 vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
777 vlan_list->vsi_id = vf->vsi_res->vsi_id;
778 vlan_list->num_elements = 1;
779 vlan_list->vlan_id[0] = vlanid;
781 args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
782 args.in_args = (u8 *)&cmd_buffer;
783 args.in_args_size = sizeof(cmd_buffer);
784 args.out_buffer = cmd_result_buffer;
785 args.out_size = I40E_AQ_BUF_SZ;
786 err = i40evf_execute_vf_cmd(dev, &args);
788 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN\n");
794 i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
797 struct vf_cmd_info args;
798 struct rte_eth_link *new_link;
800 args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
802 args.in_args_size = 0;
803 args.out_buffer = cmd_result_buffer;
804 args.out_size = I40E_AQ_BUF_SZ;
805 err = i40evf_execute_vf_cmd(dev, &args);
807 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT\n");
811 new_link = (struct rte_eth_link *)args.out_buffer;
812 (void)rte_memcpy(link, new_link, sizeof(link));
817 static struct rte_pci_id pci_id_i40evf_map[] = {
818 #define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
819 #include "rte_pci_dev_ids.h"
820 { .vendor_id = 0, /* sentinel */ },
824 i40evf_dev_atomic_read_link_status(struct rte_eth_dev *dev,
825 struct rte_eth_link *link)
827 struct rte_eth_link *dst = link;
828 struct rte_eth_link *src = &(dev->data->dev_link);
830 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
831 *(uint64_t *)src) == 0)
838 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
839 struct rte_eth_link *link)
841 struct rte_eth_link *dst = &(dev->data->dev_link);
842 struct rte_eth_link *src = link;
844 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
845 *(uint64_t *)src) == 0)
852 i40evf_reset_vf(struct i40e_hw *hw)
856 if (i40e_vf_reset(hw) != I40E_SUCCESS) {
857 PMD_INIT_LOG(ERR, "Reset VF NIC failed\n");
861 * After issuing vf reset command to pf, pf won't necessarily
862 * reset vf, it depends on what state it exactly is. If it's not
863 * initialized yet, it won't have vf reset since it's in a certain
864 * state. If not, it will try to reset. Even vf is reset, pf will
865 * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
866 * it to ACTIVE. In this duration, vf may not catch the moment that
867 * COMPLETE is set. So, for vf, we'll try to wait a long time.
871 for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
872 reset = rd32(hw, I40E_VFGEN_RSTAT) &
873 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
874 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
875 if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
881 if (i >= MAX_RESET_WAIT_CNT) {
882 PMD_INIT_LOG(ERR, "Reset VF NIC failed\n");
890 i40evf_init_vf(struct rte_eth_dev *dev)
893 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
894 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
896 err = i40evf_set_mac_type(hw);
898 PMD_INIT_LOG(ERR, "set_mac_type failed: %d\n", err);
902 i40e_init_adminq_parameter(hw);
903 err = i40e_init_adminq(hw);
905 PMD_INIT_LOG(ERR, "init_adminq failed: %d\n", err);
910 /* Reset VF and wait until it's complete */
911 if (i40evf_reset_vf(hw)) {
912 PMD_INIT_LOG(ERR, "reset NIC failed\n");
916 /* VF reset, shutdown admin queue and initialize again */
917 if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
918 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed\n");
922 i40e_init_adminq_parameter(hw);
923 if (i40e_init_adminq(hw) != I40E_SUCCESS) {
924 PMD_INIT_LOG(ERR, "init_adminq failed\n");
927 if (i40evf_check_api_version(dev) != 0) {
928 PMD_INIT_LOG(ERR, "check_api version failed\n");
931 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
932 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
933 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
935 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory\n");
939 if (i40evf_get_vf_resource(dev) != 0) {
940 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed\n");
944 /* got VF config message back from PF, now we can parse it */
945 for (i = 0; i < vf->vf_res->num_vsis; i++) {
946 if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
947 vf->vsi_res = &vf->vf_res->vsi_res[i];
951 PMD_INIT_LOG(ERR, "no LAN VSI found\n");
955 vf->vsi.vsi_id = vf->vsi_res->vsi_id;
956 vf->vsi.type = vf->vsi_res->vsi_type;
957 vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
959 /* check mac addr, if it's not valid, genrate one */
960 if (I40E_SUCCESS != i40e_validate_mac_addr(\
961 vf->vsi_res->default_mac_addr))
962 eth_random_addr(vf->vsi_res->default_mac_addr);
964 ether_addr_copy((struct ether_addr *)vf->vsi_res->default_mac_addr,
965 (struct ether_addr *)hw->mac.addr);
970 rte_free(vf->vf_res);
972 i40e_shutdown_adminq(hw); /* ignore error */
978 i40evf_dev_init(__rte_unused struct eth_driver *eth_drv,
979 struct rte_eth_dev *eth_dev)
981 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
982 eth_dev->data->dev_private);
984 PMD_INIT_FUNC_TRACE();
986 /* assign ops func pointer */
987 eth_dev->dev_ops = &i40evf_eth_dev_ops;
988 eth_dev->rx_pkt_burst = &i40e_recv_pkts;
989 eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
992 * For secondary processes, we don't initialise any further as primary
993 * has already done this work.
995 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
996 if (eth_dev->data->scattered_rx)
997 eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
1001 hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
1002 hw->device_id = eth_dev->pci_dev->id.device_id;
1003 hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
1004 hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
1005 hw->bus.device = eth_dev->pci_dev->addr.devid;
1006 hw->bus.func = eth_dev->pci_dev->addr.function;
1007 hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1009 if(i40evf_init_vf(eth_dev) != 0) {
1010 PMD_INIT_LOG(ERR, "Init vf failed\n");
1015 eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1017 if (eth_dev->data->mac_addrs == NULL) {
1018 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
1019 "store MAC addresses", ETHER_ADDR_LEN);
1022 ether_addr_copy((struct ether_addr *)hw->mac.addr,
1023 (struct ether_addr *)eth_dev->data->mac_addrs);
1029 * virtual function driver struct
1031 static struct eth_driver rte_i40evf_pmd = {
1033 .name = "rte_i40evf_pmd",
1034 .id_table = pci_id_i40evf_map,
1035 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1037 .eth_dev_init = i40evf_dev_init,
1038 .dev_private_size = sizeof(struct i40e_vf),
1042 * VF Driver initialization routine.
1043 * Invoked one at EAL init time.
1044 * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
1047 rte_i40evf_pmd_init(const char *name __rte_unused,
1048 const char *params __rte_unused)
1050 DEBUGFUNC("rte_i40evf_pmd_init");
1052 rte_eth_driver_register(&rte_i40evf_pmd);
1057 static struct rte_driver rte_i40evf_driver = {
1059 .init = rte_i40evf_pmd_init,
1062 PMD_REGISTER_DRIVER(rte_i40evf_driver);
1065 i40evf_dev_configure(__rte_unused struct rte_eth_dev *dev)
1071 i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1076 ret = i40evf_add_vlan(dev, vlan_id);
1078 ret = i40evf_del_vlan(dev,vlan_id);
1084 i40evf_rx_init(struct rte_eth_dev *dev)
1087 struct i40e_rx_queue **rxq =
1088 (struct i40e_rx_queue **)dev->data->rx_queues;
1089 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1091 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1092 if (i40e_alloc_rx_queue_mbufs(rxq[i]) != 0) {
1093 PMD_DRV_LOG(ERR, "alloc rx queues mbufs failed\n");
1096 rxq[i]->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(i);
1097 I40E_PCI_REG_WRITE(rxq[i]->qrx_tail, rxq[i]->nb_rx_desc - 1);
1100 /* Flush the operation to write registers */
1101 I40EVF_WRITE_FLUSH(hw);
1106 /* Release all mbufs */
1107 for (j = 0; j < i; j++)
1108 i40e_rx_queue_release_mbufs(rxq[j]);
1114 i40evf_tx_init(struct rte_eth_dev *dev)
1117 struct i40e_tx_queue **txq =
1118 (struct i40e_tx_queue **)dev->data->tx_queues;
1119 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1121 for (i = 0; i < dev->data->nb_tx_queues; i++)
1122 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1126 i40evf_enable_queues_intr(struct i40e_hw *hw)
1128 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1129 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1130 I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1134 i40evf_disable_queues_intr(struct i40e_hw *hw)
1136 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1141 i40evf_dev_start(struct rte_eth_dev *dev)
1143 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1144 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1145 struct ether_addr mac_addr;
1147 PMD_DRV_LOG(DEBUG, "i40evf_dev_start");
1149 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1150 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1151 if (vf->max_pkt_len <= ETHER_MAX_LEN ||
1152 vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1153 PMD_DRV_LOG(ERR, "maximum packet length must "
1154 "be larger than %u and smaller than %u,"
1155 "as jumbo frame is enabled\n",
1156 (uint32_t)ETHER_MAX_LEN,
1157 (uint32_t)I40E_FRAME_SIZE_MAX);
1158 return I40E_ERR_CONFIG;
1161 if (vf->max_pkt_len < ETHER_MIN_LEN ||
1162 vf->max_pkt_len > ETHER_MAX_LEN) {
1163 PMD_DRV_LOG(ERR, "maximum packet length must be "
1164 "larger than %u and smaller than %u, "
1165 "as jumbo frame is disabled\n",
1166 (uint32_t)ETHER_MIN_LEN,
1167 (uint32_t)ETHER_MAX_LEN);
1168 return I40E_ERR_CONFIG;
1172 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1173 dev->data->nb_tx_queues);
1175 if (i40evf_rx_init(dev) != 0){
1176 PMD_DRV_LOG(ERR, "failed to do RX init\n");
1180 i40evf_tx_init(dev);
1182 if (i40evf_configure_queues(dev) != 0) {
1183 PMD_DRV_LOG(ERR, "configure queues failed\n");
1186 if (i40evf_config_irq_map(dev)) {
1187 PMD_DRV_LOG(ERR, "config_irq_map failed\n");
1192 (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
1193 sizeof(mac_addr.addr_bytes));
1194 if (i40evf_add_mac_addr(dev, &mac_addr)) {
1195 PMD_DRV_LOG(ERR, "Failed to add mac addr\n");
1199 if (i40evf_enable_queues(dev) != 0) {
1200 PMD_DRV_LOG(ERR, "enable queues failed\n");
1203 i40evf_enable_queues_intr(hw);
1207 i40evf_del_mac_addr(dev, &mac_addr);
1209 i40e_dev_clear_queues(dev);
1214 i40evf_dev_stop(struct rte_eth_dev *dev)
1216 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1218 PMD_INIT_FUNC_TRACE();
1220 i40evf_disable_queues_intr(hw);
1221 i40evf_disable_queues(dev);
1222 i40e_dev_clear_queues(dev);
1226 i40evf_dev_link_update(struct rte_eth_dev *dev,
1227 __rte_unused int wait_to_complete)
1229 struct rte_eth_link new_link;
1230 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1232 * DPDK pf host provide interfacet to acquire link status
1233 * while Linux driver does not
1235 if (vf->host_is_dpdk)
1236 i40evf_get_link_status(dev, &new_link);
1238 /* Always assume it's up, for Linux driver PF host */
1239 new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
1240 new_link.link_speed = ETH_LINK_SPEED_10000;
1241 new_link.link_status = 1;
1243 i40evf_dev_atomic_write_link_status(dev, &new_link);
1249 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1251 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1254 /* If enabled, just return */
1255 if (vf->promisc_unicast_enabled)
1258 ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
1260 vf->promisc_unicast_enabled = TRUE;
1264 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1266 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1269 /* If disabled, just return */
1270 if (!vf->promisc_unicast_enabled)
1273 ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
1275 vf->promisc_unicast_enabled = FALSE;
1279 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1281 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1284 /* If enabled, just return */
1285 if (vf->promisc_multicast_enabled)
1288 ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
1290 vf->promisc_multicast_enabled = TRUE;
1294 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1296 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1299 /* If enabled, just return */
1300 if (!vf->promisc_multicast_enabled)
1303 ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
1305 vf->promisc_multicast_enabled = FALSE;
1309 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1311 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1313 memset(dev_info, 0, sizeof(*dev_info));
1314 dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
1315 dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
1316 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1317 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1321 i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1323 memset(stats, 0, sizeof(*stats));
1324 if (i40evf_get_statics(dev, stats))
1325 PMD_DRV_LOG(ERR, "Get statics failed\n");
1329 i40evf_dev_close(struct rte_eth_dev *dev)
1331 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1333 i40evf_dev_stop(dev);
1334 i40evf_reset_vf(hw);
1335 i40e_shutdown_adminq(hw);