1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
17 #include <rte_atomic.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_malloc.h>
23 #include <rte_memzone.h>
29 #define ICE_DCF_AQ_LEN 32
30 #define ICE_DCF_AQ_BUF_SZ 4096
32 #define ICE_DCF_ARQ_MAX_RETRIES 200
33 #define ICE_DCF_ARQ_CHECK_TIME 2 /* msecs */
35 #define ICE_DCF_VF_RES_BUF_SZ \
36 (sizeof(struct virtchnl_vf_resource) + \
37 IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
39 static __rte_always_inline int
40 ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
41 uint8_t *req_msg, uint16_t req_msglen)
43 return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
44 req_msg, req_msglen, NULL);
48 ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
49 uint8_t *rsp_msgbuf, uint16_t rsp_buflen,
52 struct iavf_arq_event_info event;
53 enum virtchnl_ops v_op;
57 event.buf_len = rsp_buflen;
58 event.msg_buf = rsp_msgbuf;
61 err = iavf_clean_arq_element(&hw->avf, &event, NULL);
62 if (err != IAVF_SUCCESS)
65 v_op = rte_le_to_cpu_32(event.desc.cookie_high);
69 if (rsp_msglen != NULL)
70 *rsp_msglen = event.msg_len;
71 return rte_le_to_cpu_32(event.desc.cookie_low);
74 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
75 } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
80 static __rte_always_inline void
81 ice_dcf_aq_cmd_clear(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
83 rte_spinlock_lock(&hw->vc_cmd_queue_lock);
85 TAILQ_REMOVE(&hw->vc_cmd_queue, cmd, next);
87 rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
90 static __rte_always_inline void
91 ice_dcf_vc_cmd_set(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
93 cmd->v_ret = IAVF_ERR_NOT_READY;
97 rte_spinlock_lock(&hw->vc_cmd_queue_lock);
99 TAILQ_INSERT_TAIL(&hw->vc_cmd_queue, cmd, next);
101 rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
104 static __rte_always_inline int
105 ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
107 return iavf_aq_send_msg_to_pf(&hw->avf,
108 cmd->v_op, IAVF_SUCCESS,
109 cmd->req_msg, cmd->req_msglen, NULL);
112 static __rte_always_inline void
113 ice_dcf_aq_cmd_handle(struct ice_dcf_hw *hw, struct iavf_arq_event_info *info)
115 struct dcf_virtchnl_cmd *cmd;
116 enum virtchnl_ops v_op;
117 enum iavf_status v_ret;
120 aq_op = rte_le_to_cpu_16(info->desc.opcode);
121 if (unlikely(aq_op != iavf_aqc_opc_send_msg_to_vf)) {
123 "Request %u is not supported yet", aq_op);
127 v_op = rte_le_to_cpu_32(info->desc.cookie_high);
128 if (v_op == VIRTCHNL_OP_EVENT) {
129 if (hw->vc_event_msg_cb != NULL)
130 hw->vc_event_msg_cb(hw,
136 v_ret = rte_le_to_cpu_32(info->desc.cookie_low);
138 rte_spinlock_lock(&hw->vc_cmd_queue_lock);
140 TAILQ_FOREACH(cmd, &hw->vc_cmd_queue, next) {
141 if (cmd->v_op == v_op && cmd->pending) {
143 cmd->rsp_msglen = RTE_MIN(info->msg_len,
145 if (likely(cmd->rsp_msglen != 0))
146 rte_memcpy(cmd->rsp_msgbuf, info->msg_buf,
149 /* prevent compiler reordering */
150 rte_compiler_barrier();
156 rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
160 ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw *hw)
162 struct iavf_arq_event_info info;
163 uint16_t pending = 1;
166 info.buf_len = ICE_DCF_AQ_BUF_SZ;
167 info.msg_buf = hw->arq_buf;
170 ret = iavf_clean_arq_element(&hw->avf, &info, &pending);
171 if (ret != IAVF_SUCCESS)
174 ice_dcf_aq_cmd_handle(hw, &info);
179 ice_dcf_init_check_api_version(struct ice_dcf_hw *hw)
181 #define ICE_CPF_VIRTCHNL_VERSION_MAJOR_START 1
182 #define ICE_CPF_VIRTCHNL_VERSION_MINOR_START 1
183 struct virtchnl_version_info version, *pver;
186 version.major = VIRTCHNL_VERSION_MAJOR;
187 version.minor = VIRTCHNL_VERSION_MINOR;
188 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_VERSION,
189 (uint8_t *)&version, sizeof(version));
191 PMD_INIT_LOG(ERR, "Failed to send OP_VERSION");
195 pver = &hw->virtchnl_version;
196 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_VERSION,
197 (uint8_t *)pver, sizeof(*pver), NULL);
199 PMD_INIT_LOG(ERR, "Failed to get response of OP_VERSION");
204 "Peer PF API version: %u.%u", pver->major, pver->minor);
206 if (pver->major < ICE_CPF_VIRTCHNL_VERSION_MAJOR_START ||
207 (pver->major == ICE_CPF_VIRTCHNL_VERSION_MAJOR_START &&
208 pver->minor < ICE_CPF_VIRTCHNL_VERSION_MINOR_START)) {
210 "VIRTCHNL API version should not be lower than (%u.%u)",
211 ICE_CPF_VIRTCHNL_VERSION_MAJOR_START,
212 ICE_CPF_VIRTCHNL_VERSION_MAJOR_START);
214 } else if (pver->major > VIRTCHNL_VERSION_MAJOR ||
215 (pver->major == VIRTCHNL_VERSION_MAJOR &&
216 pver->minor > VIRTCHNL_VERSION_MINOR)) {
218 "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
219 pver->major, pver->minor,
220 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
224 PMD_INIT_LOG(DEBUG, "Peer is supported PF host");
230 ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
235 caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
236 VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
237 VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
239 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
240 (uint8_t *)&caps, sizeof(caps));
242 PMD_DRV_LOG(ERR, "Failed to send msg OP_GET_VF_RESOURCE");
246 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
247 (uint8_t *)hw->vf_res,
248 ICE_DCF_VF_RES_BUF_SZ, NULL);
250 PMD_DRV_LOG(ERR, "Failed to get response of OP_GET_VF_RESOURCE");
254 iavf_vf_parse_hw_config(&hw->avf, hw->vf_res);
257 for (i = 0; i < hw->vf_res->num_vsis; i++) {
258 if (hw->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
259 hw->vsi_res = &hw->vf_res->vsi_res[i];
263 PMD_DRV_LOG(ERR, "no LAN VSI found");
267 hw->vsi_id = hw->vsi_res->vsi_id;
268 PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id);
274 ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
276 struct virtchnl_dcf_vsi_map *vsi_map;
277 uint32_t valid_msg_len;
281 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
284 PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_GET_VSI_MAP");
288 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
289 hw->arq_buf, ICE_DCF_AQ_BUF_SZ,
292 PMD_DRV_LOG(ERR, "Failed to get response of OP_DCF_GET_VSI_MAP");
296 vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf;
297 valid_msg_len = (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0]) +
299 if (len != valid_msg_len) {
300 PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u",
305 if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) {
306 PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)",
307 vsi_map->num_vfs, hw->num_vfs);
311 len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]);
313 if (!hw->vf_vsi_map) {
314 hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0);
315 if (!hw->vf_vsi_map) {
316 PMD_DRV_LOG(ERR, "Failed to alloc memory for VSI context");
320 hw->num_vfs = vsi_map->num_vfs;
323 if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
324 PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change");
328 rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len);
333 ice_dcf_mode_disable(struct ice_dcf_hw *hw)
337 err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
340 PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_DISABLE");
344 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
345 hw->arq_buf, ICE_DCF_AQ_BUF_SZ, NULL);
348 "Failed to get response of OP_DCF_DISABLE %d",
357 ice_dcf_check_reset_done(struct ice_dcf_hw *hw)
359 #define ICE_DCF_RESET_WAIT_CNT 50
360 struct iavf_hw *avf = &hw->avf;
363 for (i = 0; i < ICE_DCF_RESET_WAIT_CNT; i++) {
364 reset = IAVF_READ_REG(avf, IAVF_VFGEN_RSTAT) &
365 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
366 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
368 if (reset == VIRTCHNL_VFR_VFACTIVE ||
369 reset == VIRTCHNL_VFR_COMPLETED)
375 if (i >= ICE_DCF_RESET_WAIT_CNT)
382 ice_dcf_enable_irq0(struct ice_dcf_hw *hw)
384 struct iavf_hw *avf = &hw->avf;
386 /* Enable admin queue interrupt trigger */
387 IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1,
388 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
389 IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
390 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
391 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
392 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
394 IAVF_WRITE_FLUSH(avf);
398 ice_dcf_disable_irq0(struct ice_dcf_hw *hw)
400 struct iavf_hw *avf = &hw->avf;
402 /* Disable all interrupt types */
403 IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, 0);
404 IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
405 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
407 IAVF_WRITE_FLUSH(avf);
411 ice_dcf_dev_interrupt_handler(void *param)
413 struct ice_dcf_hw *hw = param;
415 ice_dcf_disable_irq0(hw);
417 ice_dcf_handle_virtchnl_msg(hw);
419 ice_dcf_enable_irq0(hw);
423 ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
424 struct dcf_virtchnl_cmd *cmd)
429 if ((cmd->req_msg && !cmd->req_msglen) ||
430 (!cmd->req_msg && cmd->req_msglen) ||
431 (cmd->rsp_msgbuf && !cmd->rsp_buflen) ||
432 (!cmd->rsp_msgbuf && cmd->rsp_buflen))
435 rte_spinlock_lock(&hw->vc_cmd_send_lock);
436 ice_dcf_vc_cmd_set(hw, cmd);
438 err = ice_dcf_vc_cmd_send(hw, cmd);
440 PMD_DRV_LOG(ERR, "fail to send cmd %d", cmd->v_op);
448 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
449 } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
451 if (cmd->v_ret != IAVF_SUCCESS) {
454 "No response (%d times) or return failure (%d) for cmd %d",
455 i, cmd->v_ret, cmd->v_op);
459 ice_dcf_aq_cmd_clear(hw, cmd);
460 rte_spinlock_unlock(&hw->vc_cmd_send_lock);
465 ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
466 void *buf, uint16_t buf_size)
468 struct dcf_virtchnl_cmd desc_cmd, buff_cmd;
469 struct ice_dcf_hw *hw = dcf_hw;
473 if ((buf && !buf_size) || (!buf && buf_size) ||
474 buf_size > ICE_DCF_AQ_BUF_SZ)
477 desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC;
478 desc_cmd.req_msglen = sizeof(*desc);
479 desc_cmd.req_msg = (uint8_t *)desc;
480 desc_cmd.rsp_buflen = sizeof(*desc);
481 desc_cmd.rsp_msgbuf = (uint8_t *)desc;
484 return ice_dcf_execute_virtchnl_cmd(hw, &desc_cmd);
486 desc->flags |= rte_cpu_to_le_16(ICE_AQ_FLAG_BUF);
488 buff_cmd.v_op = VIRTCHNL_OP_DCF_CMD_BUFF;
489 buff_cmd.req_msglen = buf_size;
490 buff_cmd.req_msg = buf;
491 buff_cmd.rsp_buflen = buf_size;
492 buff_cmd.rsp_msgbuf = buf;
494 rte_spinlock_lock(&hw->vc_cmd_send_lock);
495 ice_dcf_vc_cmd_set(hw, &desc_cmd);
496 ice_dcf_vc_cmd_set(hw, &buff_cmd);
498 if (ice_dcf_vc_cmd_send(hw, &desc_cmd) ||
499 ice_dcf_vc_cmd_send(hw, &buff_cmd)) {
501 PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF");
506 if ((!desc_cmd.pending && !buff_cmd.pending) ||
507 (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) ||
508 (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS))
511 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
512 } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
514 if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) {
517 "No response (%d times) or return failure (desc: %d / buff: %d)",
518 i, desc_cmd.v_ret, buff_cmd.v_ret);
522 ice_dcf_aq_cmd_clear(hw, &desc_cmd);
523 ice_dcf_aq_cmd_clear(hw, &buff_cmd);
524 rte_spinlock_unlock(&hw->vc_cmd_send_lock);
530 ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
532 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev);
535 rte_spinlock_lock(&hw->vc_cmd_send_lock);
537 rte_intr_disable(&pci_dev->intr_handle);
538 ice_dcf_disable_irq0(hw);
540 if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw) < 0)
543 rte_intr_enable(&pci_dev->intr_handle);
544 ice_dcf_enable_irq0(hw);
546 rte_spinlock_unlock(&hw->vc_cmd_send_lock);
552 ice_dcf_get_supported_rxdid(struct ice_dcf_hw *hw)
556 err = ice_dcf_send_cmd_req_no_irq(hw,
557 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
560 PMD_INIT_LOG(ERR, "Failed to send OP_GET_SUPPORTED_RXDIDS");
564 err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
565 (uint8_t *)&hw->supported_rxdid,
566 sizeof(uint64_t), NULL);
568 PMD_INIT_LOG(ERR, "Failed to get response of OP_GET_SUPPORTED_RXDIDS");
576 ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
578 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
581 hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
584 hw->avf.bus.bus_id = pci_dev->addr.bus;
585 hw->avf.bus.device = pci_dev->addr.devid;
586 hw->avf.bus.func = pci_dev->addr.function;
588 hw->avf.device_id = pci_dev->id.device_id;
589 hw->avf.vendor_id = pci_dev->id.vendor_id;
590 hw->avf.subsystem_device_id = pci_dev->id.subsystem_device_id;
591 hw->avf.subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
593 hw->avf.aq.num_arq_entries = ICE_DCF_AQ_LEN;
594 hw->avf.aq.num_asq_entries = ICE_DCF_AQ_LEN;
595 hw->avf.aq.arq_buf_size = ICE_DCF_AQ_BUF_SZ;
596 hw->avf.aq.asq_buf_size = ICE_DCF_AQ_BUF_SZ;
598 rte_spinlock_init(&hw->vc_cmd_send_lock);
599 rte_spinlock_init(&hw->vc_cmd_queue_lock);
600 TAILQ_INIT(&hw->vc_cmd_queue);
602 hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
603 if (hw->arq_buf == NULL) {
604 PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
608 ret = iavf_set_mac_type(&hw->avf);
610 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", ret);
614 ret = ice_dcf_check_reset_done(hw);
616 PMD_INIT_LOG(ERR, "VF is still resetting");
620 ret = iavf_init_adminq(&hw->avf);
622 PMD_INIT_LOG(ERR, "init_adminq failed: %d", ret);
626 if (ice_dcf_init_check_api_version(hw)) {
627 PMD_INIT_LOG(ERR, "check_api version failed");
631 hw->vf_res = rte_zmalloc("vf_res", ICE_DCF_VF_RES_BUF_SZ, 0);
632 if (hw->vf_res == NULL) {
633 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
637 if (ice_dcf_get_vf_resource(hw)) {
638 PMD_INIT_LOG(ERR, "Failed to get VF resource");
642 if (ice_dcf_get_vf_vsi_map(hw) < 0) {
643 PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
644 ice_dcf_mode_disable(hw);
648 /* Allocate memory for RSS info */
649 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
650 hw->rss_key = rte_zmalloc(NULL,
651 hw->vf_res->rss_key_size, 0);
653 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
656 hw->rss_lut = rte_zmalloc("rss_lut",
657 hw->vf_res->rss_lut_size, 0);
659 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
664 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
665 if (ice_dcf_get_supported_rxdid(hw) != 0) {
666 PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
671 hw->eth_dev = eth_dev;
672 rte_intr_callback_register(&pci_dev->intr_handle,
673 ice_dcf_dev_interrupt_handler, hw);
674 rte_intr_enable(&pci_dev->intr_handle);
675 ice_dcf_enable_irq0(hw);
680 rte_free(hw->rss_key);
681 rte_free(hw->rss_lut);
683 rte_free(hw->vf_res);
685 iavf_shutdown_adminq(&hw->avf);
687 rte_free(hw->arq_buf);
693 ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
695 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
696 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
698 ice_dcf_disable_irq0(hw);
699 rte_intr_disable(intr_handle);
700 rte_intr_callback_unregister(intr_handle,
701 ice_dcf_dev_interrupt_handler, hw);
703 ice_dcf_mode_disable(hw);
704 iavf_shutdown_adminq(&hw->avf);
706 rte_free(hw->arq_buf);
707 rte_free(hw->vf_vsi_map);
708 rte_free(hw->vf_res);
709 rte_free(hw->rss_lut);
710 rte_free(hw->rss_key);
714 ice_dcf_configure_rss_key(struct ice_dcf_hw *hw)
716 struct virtchnl_rss_key *rss_key;
717 struct dcf_virtchnl_cmd args;
720 len = sizeof(*rss_key) + hw->vf_res->rss_key_size - 1;
721 rss_key = rte_zmalloc("rss_key", len, 0);
725 rss_key->vsi_id = hw->vsi_res->vsi_id;
726 rss_key->key_len = hw->vf_res->rss_key_size;
727 rte_memcpy(rss_key->key, hw->rss_key, hw->vf_res->rss_key_size);
729 args.v_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
730 args.req_msglen = len;
731 args.req_msg = (uint8_t *)rss_key;
734 args.rsp_msgbuf = NULL;
737 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
739 PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_KEY");
746 ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw)
748 struct virtchnl_rss_lut *rss_lut;
749 struct dcf_virtchnl_cmd args;
752 len = sizeof(*rss_lut) + hw->vf_res->rss_lut_size - 1;
753 rss_lut = rte_zmalloc("rss_lut", len, 0);
757 rss_lut->vsi_id = hw->vsi_res->vsi_id;
758 rss_lut->lut_entries = hw->vf_res->rss_lut_size;
759 rte_memcpy(rss_lut->lut, hw->rss_lut, hw->vf_res->rss_lut_size);
761 args.v_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
762 args.req_msglen = len;
763 args.req_msg = (uint8_t *)rss_lut;
766 args.rsp_msgbuf = NULL;
769 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
771 PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_LUT");
778 ice_dcf_init_rss(struct ice_dcf_hw *hw)
780 struct rte_eth_dev *dev = hw->eth_dev;
781 struct rte_eth_rss_conf *rss_conf;
785 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
786 nb_q = dev->data->nb_rx_queues;
788 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
789 PMD_DRV_LOG(DEBUG, "RSS is not supported");
792 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
793 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
794 /* set all lut items to default queue */
795 memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
796 return ice_dcf_configure_rss_lut(hw);
799 /* In IAVF, RSS enablement is set by PF driver. It is not supported
800 * to set based on rss_conf->rss_hf.
803 /* configure RSS key */
804 if (!rss_conf->rss_key)
805 /* Calculate the default hash key */
806 for (i = 0; i < hw->vf_res->rss_key_size; i++)
807 hw->rss_key[i] = (uint8_t)rte_rand();
809 rte_memcpy(hw->rss_key, rss_conf->rss_key,
810 RTE_MIN(rss_conf->rss_key_len,
811 hw->vf_res->rss_key_size));
813 /* init RSS LUT table */
814 for (i = 0, j = 0; i < hw->vf_res->rss_lut_size; i++, j++) {
819 /* send virtchnnl ops to configure rss*/
820 ret = ice_dcf_configure_rss_lut(hw);
823 ret = ice_dcf_configure_rss_key(hw);
830 #define IAVF_RXDID_LEGACY_1 1
831 #define IAVF_RXDID_COMMS_GENERIC 16
834 ice_dcf_configure_queues(struct ice_dcf_hw *hw)
836 struct ice_rx_queue **rxq =
837 (struct ice_rx_queue **)hw->eth_dev->data->rx_queues;
838 struct ice_tx_queue **txq =
839 (struct ice_tx_queue **)hw->eth_dev->data->tx_queues;
840 struct virtchnl_vsi_queue_config_info *vc_config;
841 struct virtchnl_queue_pair_info *vc_qp;
842 struct dcf_virtchnl_cmd args;
846 size = sizeof(*vc_config) +
847 sizeof(vc_config->qpair[0]) * hw->num_queue_pairs;
848 vc_config = rte_zmalloc("cfg_queue", size, 0);
852 vc_config->vsi_id = hw->vsi_res->vsi_id;
853 vc_config->num_queue_pairs = hw->num_queue_pairs;
855 for (i = 0, vc_qp = vc_config->qpair;
856 i < hw->num_queue_pairs;
858 vc_qp->txq.vsi_id = hw->vsi_res->vsi_id;
859 vc_qp->txq.queue_id = i;
860 if (i < hw->eth_dev->data->nb_tx_queues) {
861 vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
862 vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma;
864 vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
865 vc_qp->rxq.queue_id = i;
866 vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
868 if (i >= hw->eth_dev->data->nb_rx_queues)
871 vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
872 vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
873 vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
875 if (hw->vf_res->vf_cap_flags &
876 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
877 hw->supported_rxdid &
878 BIT(IAVF_RXDID_COMMS_GENERIC)) {
879 vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC;
880 PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
881 "Queue[%d]", vc_qp->rxq.rxdid, i);
883 PMD_DRV_LOG(ERR, "RXDID 16 is not supported");
888 memset(&args, 0, sizeof(args));
889 args.v_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
890 args.req_msg = (uint8_t *)vc_config;
891 args.req_msglen = size;
893 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
895 PMD_DRV_LOG(ERR, "Failed to execute command of"
896 " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
903 ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
905 struct virtchnl_irq_map_info *map_info;
906 struct virtchnl_vector_map *vecmap;
907 struct dcf_virtchnl_cmd args;
910 len = sizeof(struct virtchnl_irq_map_info) +
911 sizeof(struct virtchnl_vector_map) * hw->nb_msix;
913 map_info = rte_zmalloc("map_info", len, 0);
917 map_info->num_vectors = hw->nb_msix;
918 for (i = 0; i < hw->nb_msix; i++) {
919 vecmap = &map_info->vecmap[i];
920 vecmap->vsi_id = hw->vsi_res->vsi_id;
921 vecmap->rxitr_idx = 0;
922 vecmap->vector_id = hw->msix_base + i;
924 vecmap->rxq_map = hw->rxq_map[hw->msix_base + i];
927 memset(&args, 0, sizeof(args));
928 args.v_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
929 args.req_msg = (u8 *)map_info;
930 args.req_msglen = len;
932 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
934 PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
941 ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
943 struct virtchnl_queue_select queue_select;
944 struct dcf_virtchnl_cmd args;
947 memset(&queue_select, 0, sizeof(queue_select));
948 queue_select.vsi_id = hw->vsi_res->vsi_id;
950 queue_select.rx_queues |= 1 << qid;
952 queue_select.tx_queues |= 1 << qid;
954 memset(&args, 0, sizeof(args));
956 args.v_op = VIRTCHNL_OP_ENABLE_QUEUES;
958 args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
960 args.req_msg = (u8 *)&queue_select;
961 args.req_msglen = sizeof(queue_select);
963 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
965 PMD_DRV_LOG(ERR, "Failed to execute command of %s",
966 on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
972 ice_dcf_disable_queues(struct ice_dcf_hw *hw)
974 struct virtchnl_queue_select queue_select;
975 struct dcf_virtchnl_cmd args;
978 memset(&queue_select, 0, sizeof(queue_select));
979 queue_select.vsi_id = hw->vsi_res->vsi_id;
981 queue_select.rx_queues = BIT(hw->eth_dev->data->nb_rx_queues) - 1;
982 queue_select.tx_queues = BIT(hw->eth_dev->data->nb_tx_queues) - 1;
984 memset(&args, 0, sizeof(args));
985 args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
986 args.req_msg = (u8 *)&queue_select;
987 args.req_msglen = sizeof(queue_select);
989 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
992 "Failed to execute command of OP_DISABLE_QUEUES");
998 ice_dcf_query_stats(struct ice_dcf_hw *hw,
999 struct virtchnl_eth_stats *pstats)
1001 struct virtchnl_queue_select q_stats;
1002 struct dcf_virtchnl_cmd args;
1005 memset(&q_stats, 0, sizeof(q_stats));
1006 q_stats.vsi_id = hw->vsi_res->vsi_id;
1008 args.v_op = VIRTCHNL_OP_GET_STATS;
1009 args.req_msg = (uint8_t *)&q_stats;
1010 args.req_msglen = sizeof(q_stats);
1011 args.rsp_msglen = sizeof(*pstats);
1012 args.rsp_msgbuf = (uint8_t *)pstats;
1013 args.rsp_buflen = sizeof(*pstats);
1015 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1017 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
1025 ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add)
1027 struct virtchnl_ether_addr_list *list;
1028 struct rte_ether_addr *addr;
1029 struct dcf_virtchnl_cmd args;
1032 len = sizeof(struct virtchnl_ether_addr_list);
1033 addr = hw->eth_dev->data->mac_addrs;
1034 len += sizeof(struct virtchnl_ether_addr);
1036 list = rte_zmalloc(NULL, len, 0);
1038 PMD_DRV_LOG(ERR, "fail to allocate memory");
1042 rte_memcpy(list->list[0].addr, addr->addr_bytes,
1043 sizeof(addr->addr_bytes));
1044 PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
1045 addr->addr_bytes[0], addr->addr_bytes[1],
1046 addr->addr_bytes[2], addr->addr_bytes[3],
1047 addr->addr_bytes[4], addr->addr_bytes[5]);
1049 list->vsi_id = hw->vsi_res->vsi_id;
1050 list->num_elements = 1;
1052 memset(&args, 0, sizeof(args));
1053 args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
1054 VIRTCHNL_OP_DEL_ETH_ADDR;
1055 args.req_msg = (uint8_t *)list;
1056 args.req_msglen = len;
1057 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1059 PMD_DRV_LOG(ERR, "fail to execute command %s",
1060 add ? "OP_ADD_ETHER_ADDRESS" :
1061 "OP_DEL_ETHER_ADDRESS");