1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
17 #include <rte_atomic.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_malloc.h>
23 #include <rte_memzone.h>
29 #define ICE_DCF_AQ_LEN 32
30 #define ICE_DCF_AQ_BUF_SZ 4096
32 #define ICE_DCF_ARQ_MAX_RETRIES 200
33 #define ICE_DCF_ARQ_CHECK_TIME 2 /* msecs */
35 #define ICE_DCF_VF_RES_BUF_SZ \
36 (sizeof(struct virtchnl_vf_resource) + \
37 IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
39 static __rte_always_inline int
40 ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
41 uint8_t *req_msg, uint16_t req_msglen)
43 return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
44 req_msg, req_msglen, NULL);
48 ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
49 uint8_t *rsp_msgbuf, uint16_t rsp_buflen,
52 struct iavf_arq_event_info event;
53 enum virtchnl_ops v_op;
57 event.buf_len = rsp_buflen;
58 event.msg_buf = rsp_msgbuf;
61 err = iavf_clean_arq_element(&hw->avf, &event, NULL);
62 if (err != IAVF_SUCCESS)
65 v_op = rte_le_to_cpu_32(event.desc.cookie_high);
69 if (rsp_msglen != NULL)
70 *rsp_msglen = event.msg_len;
71 return rte_le_to_cpu_32(event.desc.cookie_low);
74 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
75 } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
80 static __rte_always_inline void
81 ice_dcf_aq_cmd_clear(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
83 rte_spinlock_lock(&hw->vc_cmd_queue_lock);
85 TAILQ_REMOVE(&hw->vc_cmd_queue, cmd, next);
87 rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
90 static __rte_always_inline void
91 ice_dcf_vc_cmd_set(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
93 cmd->v_ret = IAVF_ERR_NOT_READY;
97 rte_spinlock_lock(&hw->vc_cmd_queue_lock);
99 TAILQ_INSERT_TAIL(&hw->vc_cmd_queue, cmd, next);
101 rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
104 static __rte_always_inline int
105 ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
107 return iavf_aq_send_msg_to_pf(&hw->avf,
108 cmd->v_op, IAVF_SUCCESS,
109 cmd->req_msg, cmd->req_msglen, NULL);
112 static __rte_always_inline void
113 ice_dcf_aq_cmd_handle(struct ice_dcf_hw *hw, struct iavf_arq_event_info *info)
115 struct dcf_virtchnl_cmd *cmd;
116 enum virtchnl_ops v_op;
117 enum iavf_status v_ret;
120 aq_op = rte_le_to_cpu_16(info->desc.opcode);
121 if (unlikely(aq_op != iavf_aqc_opc_send_msg_to_vf)) {
123 "Request %u is not supported yet", aq_op);
127 v_op = rte_le_to_cpu_32(info->desc.cookie_high);
128 if (v_op == VIRTCHNL_OP_EVENT) {
129 if (hw->vc_event_msg_cb != NULL)
130 hw->vc_event_msg_cb(hw,
136 v_ret = rte_le_to_cpu_32(info->desc.cookie_low);
138 rte_spinlock_lock(&hw->vc_cmd_queue_lock);
140 TAILQ_FOREACH(cmd, &hw->vc_cmd_queue, next) {
141 if (cmd->v_op == v_op && cmd->pending) {
143 cmd->rsp_msglen = RTE_MIN(info->msg_len,
145 if (likely(cmd->rsp_msglen != 0))
146 rte_memcpy(cmd->rsp_msgbuf, info->msg_buf,
149 /* prevent compiler reordering */
150 rte_compiler_barrier();
156 rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
160 ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw *hw)
162 struct iavf_arq_event_info info;
163 uint16_t pending = 1;
166 info.buf_len = ICE_DCF_AQ_BUF_SZ;
167 info.msg_buf = hw->arq_buf;
169 while (pending && !hw->resetting) {
170 ret = iavf_clean_arq_element(&hw->avf, &info, &pending);
171 if (ret != IAVF_SUCCESS)
174 ice_dcf_aq_cmd_handle(hw, &info);
179 ice_dcf_init_check_api_version(struct ice_dcf_hw *hw)
181 #define ICE_CPF_VIRTCHNL_VERSION_MAJOR_START 1
182 #define ICE_CPF_VIRTCHNL_VERSION_MINOR_START 1
183 struct virtchnl_version_info version, *pver;
186 version.major = VIRTCHNL_VERSION_MAJOR;
187 version.minor = VIRTCHNL_VERSION_MINOR;
188 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_VERSION,
189 (uint8_t *)&version, sizeof(version));
191 PMD_INIT_LOG(ERR, "Failed to send OP_VERSION");
195 pver = &hw->virtchnl_version;
196 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_VERSION,
197 (uint8_t *)pver, sizeof(*pver), NULL);
199 PMD_INIT_LOG(ERR, "Failed to get response of OP_VERSION");
204 "Peer PF API version: %u.%u", pver->major, pver->minor);
206 if (pver->major < ICE_CPF_VIRTCHNL_VERSION_MAJOR_START ||
207 (pver->major == ICE_CPF_VIRTCHNL_VERSION_MAJOR_START &&
208 pver->minor < ICE_CPF_VIRTCHNL_VERSION_MINOR_START)) {
210 "VIRTCHNL API version should not be lower than (%u.%u)",
211 ICE_CPF_VIRTCHNL_VERSION_MAJOR_START,
212 ICE_CPF_VIRTCHNL_VERSION_MAJOR_START);
214 } else if (pver->major > VIRTCHNL_VERSION_MAJOR ||
215 (pver->major == VIRTCHNL_VERSION_MAJOR &&
216 pver->minor > VIRTCHNL_VERSION_MINOR)) {
218 "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
219 pver->major, pver->minor,
220 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
224 PMD_INIT_LOG(DEBUG, "Peer is supported PF host");
230 ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
235 caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
236 VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
237 VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
238 VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
239 VIRTCHNL_VF_OFFLOAD_QOS;
241 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
242 (uint8_t *)&caps, sizeof(caps));
244 PMD_DRV_LOG(ERR, "Failed to send msg OP_GET_VF_RESOURCE");
248 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
249 (uint8_t *)hw->vf_res,
250 ICE_DCF_VF_RES_BUF_SZ, NULL);
252 PMD_DRV_LOG(ERR, "Failed to get response of OP_GET_VF_RESOURCE");
256 iavf_vf_parse_hw_config(&hw->avf, hw->vf_res);
259 for (i = 0; i < hw->vf_res->num_vsis; i++) {
260 if (hw->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
261 hw->vsi_res = &hw->vf_res->vsi_res[i];
265 PMD_DRV_LOG(ERR, "no LAN VSI found");
269 hw->vsi_id = hw->vsi_res->vsi_id;
270 PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id);
276 ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
278 struct virtchnl_dcf_vsi_map *vsi_map;
279 uint32_t valid_msg_len;
283 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
286 PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_GET_VSI_MAP");
290 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
291 hw->arq_buf, ICE_DCF_AQ_BUF_SZ,
294 PMD_DRV_LOG(ERR, "Failed to get response of OP_DCF_GET_VSI_MAP");
298 vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf;
299 valid_msg_len = (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0]) +
301 if (len != valid_msg_len) {
302 PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u",
307 if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) {
308 PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)",
309 vsi_map->num_vfs, hw->num_vfs);
313 len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]);
315 if (!hw->vf_vsi_map) {
316 hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0);
317 if (!hw->vf_vsi_map) {
318 PMD_DRV_LOG(ERR, "Failed to alloc memory for VSI context");
322 hw->num_vfs = vsi_map->num_vfs;
323 hw->pf_vsi_id = vsi_map->pf_vsi;
326 if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
327 PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change");
331 rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len);
336 ice_dcf_mode_disable(struct ice_dcf_hw *hw)
343 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
346 PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_DISABLE");
350 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
351 hw->arq_buf, ICE_DCF_AQ_BUF_SZ, NULL);
354 "Failed to get response of OP_DCF_DISABLE %d",
363 ice_dcf_check_reset_done(struct ice_dcf_hw *hw)
365 #define ICE_DCF_RESET_WAIT_CNT 50
366 struct iavf_hw *avf = &hw->avf;
369 for (i = 0; i < ICE_DCF_RESET_WAIT_CNT; i++) {
370 reset = IAVF_READ_REG(avf, IAVF_VFGEN_RSTAT) &
371 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
372 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
374 if (reset == VIRTCHNL_VFR_VFACTIVE ||
375 reset == VIRTCHNL_VFR_COMPLETED)
381 if (i >= ICE_DCF_RESET_WAIT_CNT)
388 ice_dcf_enable_irq0(struct ice_dcf_hw *hw)
390 struct iavf_hw *avf = &hw->avf;
392 /* Enable admin queue interrupt trigger */
393 IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1,
394 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
395 IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
396 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
397 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
398 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
400 IAVF_WRITE_FLUSH(avf);
404 ice_dcf_disable_irq0(struct ice_dcf_hw *hw)
406 struct iavf_hw *avf = &hw->avf;
408 /* Disable all interrupt types */
409 IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, 0);
410 IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
411 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
413 IAVF_WRITE_FLUSH(avf);
417 ice_dcf_dev_interrupt_handler(void *param)
419 struct ice_dcf_hw *hw = param;
421 ice_dcf_disable_irq0(hw);
423 ice_dcf_handle_virtchnl_msg(hw);
425 ice_dcf_enable_irq0(hw);
429 ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
430 struct dcf_virtchnl_cmd *cmd)
435 if ((cmd->req_msg && !cmd->req_msglen) ||
436 (!cmd->req_msg && cmd->req_msglen) ||
437 (cmd->rsp_msgbuf && !cmd->rsp_buflen) ||
438 (!cmd->rsp_msgbuf && cmd->rsp_buflen))
441 rte_spinlock_lock(&hw->vc_cmd_send_lock);
442 ice_dcf_vc_cmd_set(hw, cmd);
444 err = ice_dcf_vc_cmd_send(hw, cmd);
446 PMD_DRV_LOG(ERR, "fail to send cmd %d", cmd->v_op);
454 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
455 } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
457 if (cmd->v_ret != IAVF_SUCCESS) {
460 "No response (%d times) or return failure (%d) for cmd %d",
461 i, cmd->v_ret, cmd->v_op);
465 ice_dcf_aq_cmd_clear(hw, cmd);
466 rte_spinlock_unlock(&hw->vc_cmd_send_lock);
471 ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
472 void *buf, uint16_t buf_size)
474 struct dcf_virtchnl_cmd desc_cmd, buff_cmd;
475 struct ice_dcf_hw *hw = dcf_hw;
479 if ((buf && !buf_size) || (!buf && buf_size) ||
480 buf_size > ICE_DCF_AQ_BUF_SZ)
483 desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC;
484 desc_cmd.req_msglen = sizeof(*desc);
485 desc_cmd.req_msg = (uint8_t *)desc;
486 desc_cmd.rsp_buflen = sizeof(*desc);
487 desc_cmd.rsp_msgbuf = (uint8_t *)desc;
490 return ice_dcf_execute_virtchnl_cmd(hw, &desc_cmd);
492 desc->flags |= rte_cpu_to_le_16(ICE_AQ_FLAG_BUF);
494 buff_cmd.v_op = VIRTCHNL_OP_DCF_CMD_BUFF;
495 buff_cmd.req_msglen = buf_size;
496 buff_cmd.req_msg = buf;
497 buff_cmd.rsp_buflen = buf_size;
498 buff_cmd.rsp_msgbuf = buf;
500 rte_spinlock_lock(&hw->vc_cmd_send_lock);
501 ice_dcf_vc_cmd_set(hw, &desc_cmd);
502 ice_dcf_vc_cmd_set(hw, &buff_cmd);
504 if (ice_dcf_vc_cmd_send(hw, &desc_cmd) ||
505 ice_dcf_vc_cmd_send(hw, &buff_cmd)) {
507 PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF");
512 if (!desc_cmd.pending && !buff_cmd.pending)
515 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
516 } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
518 if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) {
521 "No response (%d times) or return failure (desc: %d / buff: %d)",
522 i, desc_cmd.v_ret, buff_cmd.v_ret);
526 ice_dcf_aq_cmd_clear(hw, &desc_cmd);
527 ice_dcf_aq_cmd_clear(hw, &buff_cmd);
528 rte_spinlock_unlock(&hw->vc_cmd_send_lock);
534 ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
536 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev);
540 rte_spinlock_lock(&hw->vc_cmd_send_lock);
542 rte_intr_disable(pci_dev->intr_handle);
543 ice_dcf_disable_irq0(hw);
546 if (ice_dcf_get_vf_resource(hw) == 0 &&
547 ice_dcf_get_vf_vsi_map(hw) >= 0) {
552 if (++i >= ICE_DCF_ARQ_MAX_RETRIES)
555 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
558 rte_intr_enable(pci_dev->intr_handle);
559 ice_dcf_enable_irq0(hw);
561 rte_spinlock_unlock(&hw->vc_cmd_send_lock);
567 ice_dcf_get_supported_rxdid(struct ice_dcf_hw *hw)
571 err = ice_dcf_send_cmd_req_no_irq(hw,
572 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
575 PMD_INIT_LOG(ERR, "Failed to send OP_GET_SUPPORTED_RXDIDS");
579 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
580 (uint8_t *)&hw->supported_rxdid,
581 sizeof(uint64_t), NULL);
583 PMD_INIT_LOG(ERR, "Failed to get response of OP_GET_SUPPORTED_RXDIDS");
591 dcf_get_vlan_offload_caps_v2(struct ice_dcf_hw *hw)
593 struct virtchnl_vlan_caps vlan_v2_caps;
594 struct dcf_virtchnl_cmd args;
597 memset(&args, 0, sizeof(args));
598 args.v_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
599 args.rsp_msgbuf = (uint8_t *)&vlan_v2_caps;
600 args.rsp_buflen = sizeof(vlan_v2_caps);
602 ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
605 "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
609 rte_memcpy(&hw->vlan_v2_caps, &vlan_v2_caps, sizeof(vlan_v2_caps));
614 ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
616 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
619 hw->resetting = false;
621 hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
624 hw->avf.bus.bus_id = pci_dev->addr.bus;
625 hw->avf.bus.device = pci_dev->addr.devid;
626 hw->avf.bus.func = pci_dev->addr.function;
628 hw->avf.device_id = pci_dev->id.device_id;
629 hw->avf.vendor_id = pci_dev->id.vendor_id;
630 hw->avf.subsystem_device_id = pci_dev->id.subsystem_device_id;
631 hw->avf.subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
633 hw->avf.aq.num_arq_entries = ICE_DCF_AQ_LEN;
634 hw->avf.aq.num_asq_entries = ICE_DCF_AQ_LEN;
635 hw->avf.aq.arq_buf_size = ICE_DCF_AQ_BUF_SZ;
636 hw->avf.aq.asq_buf_size = ICE_DCF_AQ_BUF_SZ;
638 rte_spinlock_init(&hw->vc_cmd_send_lock);
639 rte_spinlock_init(&hw->vc_cmd_queue_lock);
640 TAILQ_INIT(&hw->vc_cmd_queue);
642 hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
643 if (hw->arq_buf == NULL) {
644 PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
648 ret = iavf_set_mac_type(&hw->avf);
650 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", ret);
654 ret = ice_dcf_check_reset_done(hw);
656 PMD_INIT_LOG(ERR, "VF is still resetting");
660 ret = iavf_init_adminq(&hw->avf);
662 PMD_INIT_LOG(ERR, "init_adminq failed: %d", ret);
666 if (ice_dcf_init_check_api_version(hw)) {
667 PMD_INIT_LOG(ERR, "check_api version failed");
671 hw->vf_res = rte_zmalloc("vf_res", ICE_DCF_VF_RES_BUF_SZ, 0);
672 if (hw->vf_res == NULL) {
673 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
677 if (ice_dcf_get_vf_resource(hw)) {
678 PMD_INIT_LOG(ERR, "Failed to get VF resource");
682 if (ice_dcf_get_vf_vsi_map(hw) < 0) {
683 PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
684 ice_dcf_mode_disable(hw);
688 /* Allocate memory for RSS info */
689 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
690 hw->rss_key = rte_zmalloc(NULL,
691 hw->vf_res->rss_key_size, 0);
693 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
696 hw->rss_lut = rte_zmalloc("rss_lut",
697 hw->vf_res->rss_lut_size, 0);
699 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
704 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
705 if (ice_dcf_get_supported_rxdid(hw) != 0) {
706 PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
711 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
712 ice_dcf_tm_conf_init(eth_dev);
713 size = sizeof(struct virtchnl_dcf_bw_cfg_list *) * hw->num_vfs;
714 hw->qos_bw_cfg = rte_zmalloc("qos_bw_cfg", size, 0);
715 if (!hw->qos_bw_cfg) {
716 PMD_INIT_LOG(ERR, "no memory for qos_bw_cfg");
721 hw->eth_dev = eth_dev;
722 rte_intr_callback_register(pci_dev->intr_handle,
723 ice_dcf_dev_interrupt_handler, hw);
724 rte_intr_enable(pci_dev->intr_handle);
725 ice_dcf_enable_irq0(hw);
727 if ((hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) &&
728 dcf_get_vlan_offload_caps_v2(hw))
734 rte_free(hw->rss_key);
735 rte_free(hw->rss_lut);
737 rte_free(hw->vf_res);
739 iavf_shutdown_adminq(&hw->avf);
741 rte_free(hw->arq_buf);
747 ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
749 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
750 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
752 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
753 if (hw->tm_conf.committed) {
754 ice_dcf_clear_bw(hw);
755 ice_dcf_tm_conf_uninit(eth_dev);
758 ice_dcf_disable_irq0(hw);
759 rte_intr_disable(intr_handle);
760 rte_intr_callback_unregister(intr_handle,
761 ice_dcf_dev_interrupt_handler, hw);
763 ice_dcf_mode_disable(hw);
764 iavf_shutdown_adminq(&hw->avf);
766 rte_free(hw->arq_buf);
769 rte_free(hw->vf_vsi_map);
770 hw->vf_vsi_map = NULL;
772 rte_free(hw->vf_res);
775 rte_free(hw->rss_lut);
778 rte_free(hw->rss_key);
781 rte_free(hw->qos_bw_cfg);
782 hw->qos_bw_cfg = NULL;
784 rte_free(hw->ets_config);
785 hw->ets_config = NULL;
789 ice_dcf_configure_rss_key(struct ice_dcf_hw *hw)
791 struct virtchnl_rss_key *rss_key;
792 struct dcf_virtchnl_cmd args;
795 len = sizeof(*rss_key) + hw->vf_res->rss_key_size - 1;
796 rss_key = rte_zmalloc("rss_key", len, 0);
800 rss_key->vsi_id = hw->vsi_res->vsi_id;
801 rss_key->key_len = hw->vf_res->rss_key_size;
802 rte_memcpy(rss_key->key, hw->rss_key, hw->vf_res->rss_key_size);
804 args.v_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
805 args.req_msglen = len;
806 args.req_msg = (uint8_t *)rss_key;
809 args.rsp_msgbuf = NULL;
812 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
814 PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_KEY");
821 ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw)
823 struct virtchnl_rss_lut *rss_lut;
824 struct dcf_virtchnl_cmd args;
827 len = sizeof(*rss_lut) + hw->vf_res->rss_lut_size - 1;
828 rss_lut = rte_zmalloc("rss_lut", len, 0);
832 rss_lut->vsi_id = hw->vsi_res->vsi_id;
833 rss_lut->lut_entries = hw->vf_res->rss_lut_size;
834 rte_memcpy(rss_lut->lut, hw->rss_lut, hw->vf_res->rss_lut_size);
836 args.v_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
837 args.req_msglen = len;
838 args.req_msg = (uint8_t *)rss_lut;
841 args.rsp_msgbuf = NULL;
844 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
846 PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_LUT");
853 ice_dcf_init_rss(struct ice_dcf_hw *hw)
855 struct rte_eth_dev *dev = hw->eth_dev;
856 struct rte_eth_rss_conf *rss_conf;
860 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
861 nb_q = dev->data->nb_rx_queues;
863 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
864 PMD_DRV_LOG(DEBUG, "RSS is not supported");
867 if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
868 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
869 /* set all lut items to default queue */
870 memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
871 return ice_dcf_configure_rss_lut(hw);
874 /* In IAVF, RSS enablement is set by PF driver. It is not supported
875 * to set based on rss_conf->rss_hf.
878 /* configure RSS key */
879 if (!rss_conf->rss_key)
880 /* Calculate the default hash key */
881 for (i = 0; i < hw->vf_res->rss_key_size; i++)
882 hw->rss_key[i] = (uint8_t)rte_rand();
884 rte_memcpy(hw->rss_key, rss_conf->rss_key,
885 RTE_MIN(rss_conf->rss_key_len,
886 hw->vf_res->rss_key_size));
888 /* init RSS LUT table */
889 for (i = 0, j = 0; i < hw->vf_res->rss_lut_size; i++, j++) {
894 /* send virtchnl ops to configure RSS */
895 ret = ice_dcf_configure_rss_lut(hw);
898 ret = ice_dcf_configure_rss_key(hw);
905 #define IAVF_RXDID_LEGACY_0 0
906 #define IAVF_RXDID_LEGACY_1 1
907 #define IAVF_RXDID_COMMS_OVS_1 22
910 ice_dcf_configure_queues(struct ice_dcf_hw *hw)
912 struct ice_rx_queue **rxq =
913 (struct ice_rx_queue **)hw->eth_dev->data->rx_queues;
914 struct ice_tx_queue **txq =
915 (struct ice_tx_queue **)hw->eth_dev->data->tx_queues;
916 struct virtchnl_vsi_queue_config_info *vc_config;
917 struct virtchnl_queue_pair_info *vc_qp;
918 struct dcf_virtchnl_cmd args;
922 size = sizeof(*vc_config) +
923 sizeof(vc_config->qpair[0]) * hw->num_queue_pairs;
924 vc_config = rte_zmalloc("cfg_queue", size, 0);
928 vc_config->vsi_id = hw->vsi_res->vsi_id;
929 vc_config->num_queue_pairs = hw->num_queue_pairs;
931 for (i = 0, vc_qp = vc_config->qpair;
932 i < hw->num_queue_pairs;
934 vc_qp->txq.vsi_id = hw->vsi_res->vsi_id;
935 vc_qp->txq.queue_id = i;
936 if (i < hw->eth_dev->data->nb_tx_queues) {
937 vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
938 vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma;
940 vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
941 vc_qp->rxq.queue_id = i;
943 if (i >= hw->eth_dev->data->nb_rx_queues)
946 vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
947 vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
948 vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
949 vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
951 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
952 if (hw->vf_res->vf_cap_flags &
953 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
954 hw->supported_rxdid &
955 BIT(IAVF_RXDID_COMMS_OVS_1)) {
956 vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
957 PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
958 "Queue[%d]", vc_qp->rxq.rxdid, i);
960 PMD_DRV_LOG(ERR, "RXDID 16 is not supported");
964 if (hw->vf_res->vf_cap_flags &
965 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
966 hw->supported_rxdid &
967 BIT(IAVF_RXDID_LEGACY_0)) {
968 vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
969 PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
970 "Queue[%d]", vc_qp->rxq.rxdid, i);
972 PMD_DRV_LOG(ERR, "RXDID == 0 is not supported");
976 ice_select_rxd_to_pkt_fields_handler(rxq[i], vc_qp->rxq.rxdid);
979 memset(&args, 0, sizeof(args));
980 args.v_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
981 args.req_msg = (uint8_t *)vc_config;
982 args.req_msglen = size;
984 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
986 PMD_DRV_LOG(ERR, "Failed to execute command of"
987 " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
994 ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
996 struct virtchnl_irq_map_info *map_info;
997 struct virtchnl_vector_map *vecmap;
998 struct dcf_virtchnl_cmd args;
1001 len = sizeof(struct virtchnl_irq_map_info) +
1002 sizeof(struct virtchnl_vector_map) * hw->nb_msix;
1004 map_info = rte_zmalloc("map_info", len, 0);
1008 map_info->num_vectors = hw->nb_msix;
1009 for (i = 0; i < hw->nb_msix; i++) {
1010 vecmap = &map_info->vecmap[i];
1011 vecmap->vsi_id = hw->vsi_res->vsi_id;
1012 vecmap->rxitr_idx = 0;
1013 vecmap->vector_id = hw->msix_base + i;
1014 vecmap->txq_map = 0;
1015 vecmap->rxq_map = hw->rxq_map[hw->msix_base + i];
1018 memset(&args, 0, sizeof(args));
1019 args.v_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
1020 args.req_msg = (u8 *)map_info;
1021 args.req_msglen = len;
1023 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1025 PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
1032 ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
1034 struct virtchnl_queue_select queue_select;
1035 struct dcf_virtchnl_cmd args;
1038 memset(&queue_select, 0, sizeof(queue_select));
1039 queue_select.vsi_id = hw->vsi_res->vsi_id;
1041 queue_select.rx_queues |= 1 << qid;
1043 queue_select.tx_queues |= 1 << qid;
1045 memset(&args, 0, sizeof(args));
1047 args.v_op = VIRTCHNL_OP_ENABLE_QUEUES;
1049 args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
1051 args.req_msg = (u8 *)&queue_select;
1052 args.req_msglen = sizeof(queue_select);
1054 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1056 PMD_DRV_LOG(ERR, "Failed to execute command of %s",
1057 on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
1063 ice_dcf_disable_queues(struct ice_dcf_hw *hw)
1065 struct virtchnl_queue_select queue_select;
1066 struct dcf_virtchnl_cmd args;
1072 memset(&queue_select, 0, sizeof(queue_select));
1073 queue_select.vsi_id = hw->vsi_res->vsi_id;
1075 queue_select.rx_queues = BIT(hw->eth_dev->data->nb_rx_queues) - 1;
1076 queue_select.tx_queues = BIT(hw->eth_dev->data->nb_tx_queues) - 1;
1078 memset(&args, 0, sizeof(args));
1079 args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
1080 args.req_msg = (u8 *)&queue_select;
1081 args.req_msglen = sizeof(queue_select);
1083 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1086 "Failed to execute command of OP_DISABLE_QUEUES");
1092 ice_dcf_query_stats(struct ice_dcf_hw *hw,
1093 struct virtchnl_eth_stats *pstats)
1095 struct virtchnl_queue_select q_stats;
1096 struct dcf_virtchnl_cmd args;
1099 memset(&q_stats, 0, sizeof(q_stats));
1100 q_stats.vsi_id = hw->vsi_res->vsi_id;
1102 args.v_op = VIRTCHNL_OP_GET_STATS;
1103 args.req_msg = (uint8_t *)&q_stats;
1104 args.req_msglen = sizeof(q_stats);
1105 args.rsp_msglen = sizeof(*pstats);
1106 args.rsp_msgbuf = (uint8_t *)pstats;
1107 args.rsp_buflen = sizeof(*pstats);
1109 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1111 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
1119 ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw,
1120 struct rte_ether_addr *addr,
1121 bool add, uint8_t type)
1123 struct virtchnl_ether_addr_list *list;
1124 struct dcf_virtchnl_cmd args;
1127 if (hw->resetting) {
1131 PMD_DRV_LOG(ERR, "fail to add all MACs for VF resetting");
1135 len = sizeof(struct virtchnl_ether_addr_list);
1136 len += sizeof(struct virtchnl_ether_addr);
1138 list = rte_zmalloc(NULL, len, 0);
1140 PMD_DRV_LOG(ERR, "fail to allocate memory");
1144 rte_memcpy(list->list[0].addr, addr->addr_bytes,
1145 sizeof(addr->addr_bytes));
1147 PMD_DRV_LOG(DEBUG, "add/rm mac:" RTE_ETHER_ADDR_PRT_FMT,
1148 RTE_ETHER_ADDR_BYTES(addr));
1149 list->list[0].type = type;
1150 list->vsi_id = hw->vsi_res->vsi_id;
1151 list->num_elements = 1;
1153 memset(&args, 0, sizeof(args));
1154 args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
1155 VIRTCHNL_OP_DEL_ETH_ADDR;
1156 args.req_msg = (uint8_t *)list;
1157 args.req_msglen = len;
1158 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1160 PMD_DRV_LOG(ERR, "fail to execute command %s",
1161 add ? "OP_ADD_ETHER_ADDRESS" :
1162 "OP_DEL_ETHER_ADDRESS");