return 0;
}
+#define IAVF_RXDID_LEGACY_0 0
#define IAVF_RXDID_LEGACY_1 1
#define IAVF_RXDID_COMMS_GENERIC 16
vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (hw->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
hw->supported_rxdid &
PMD_DRV_LOG(ERR, "RXDID 16 is not supported");
return -EINVAL;
}
+#else
+ if (hw->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
+ hw->supported_rxdid &
+ BIT(IAVF_RXDID_LEGACY_0)) {
+ vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
+ PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
+ "Queue[%d]", vc_qp->rxq.rxdid, i);
+ } else {
+ PMD_DRV_LOG(ERR, "RXDID == 0 is not supported");
+ return -EINVAL;
+ }
+#endif
}
memset(&args, 0, sizeof(args));
return 0;
}
+
+int
+ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct rte_ether_addr *addr;
+ struct dcf_virtchnl_cmd args;
+ int len, err = 0;
+
+ len = sizeof(struct virtchnl_ether_addr_list);
+ addr = hw->eth_dev->data->mac_addrs;
+ len += sizeof(struct virtchnl_ether_addr);
+
+ list = rte_zmalloc(NULL, len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+ PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+
+ list->vsi_id = hw->vsi_res->vsi_id;
+ list->num_elements = 1;
+
+ memset(&args, 0, sizeof(args));
+ args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
+ VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.req_msg = (uint8_t *)list;
+ args.req_msglen = len;
+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETHER_ADDRESS" :
+ "OP_DEL_ETHER_ADDRESS");
+ rte_free(list);
+ return err;
+}