#include <rte_atomic.h>
#include <rte_eal.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_dev.h>
#include "iavf.h"
case VIRTCHNL_OP_VERSION:
case VIRTCHNL_OP_GET_VF_RESOURCES:
case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
/* for init virtchnl ops, need to poll the response */
do {
result = iavf_read_msg_from_pf(adapter, args->out_size,
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
- VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+ VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_OFFLOAD_CRC |
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VIRTCHNL_VF_LARGE_NUM_QPAIRS;
args.in_args = (uint8_t *)∩︀
args.in_args_size = sizeof(caps);
return 0;
}
+int
+iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_vlan_supported_caps *stripping_caps;
+ struct virtchnl_vlan_setting vlan_strip;
+ struct iavf_cmd_info args;
+ uint32_t *ethertype;
+ int ret;
+
+ stripping_caps = &vf->vlan_v2_caps.offloads.stripping_support;
+
+ if ((stripping_caps->outer & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
+ (stripping_caps->outer & VIRTCHNL_VLAN_TOGGLE))
+ ethertype = &vlan_strip.outer_ethertype_setting;
+ else if ((stripping_caps->inner & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
+ (stripping_caps->inner & VIRTCHNL_VLAN_TOGGLE))
+ ethertype = &vlan_strip.inner_ethertype_setting;
+ else
+ return -ENOTSUP;
+
+ memset(&vlan_strip, 0, sizeof(vlan_strip));
+ vlan_strip.vport_id = vf->vsi_res->vsi_id;
+ *ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ args.ops = enable ? VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 :
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2;
+ args.in_args = (uint8_t *)&vlan_strip;
+ args.in_args_size = sizeof(vlan_strip);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ ret = iavf_execute_vf_cmd(adapter, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
+ "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2");
+
+ return ret;
+}
+
+int
+iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_vlan_supported_caps *insertion_caps;
+ struct virtchnl_vlan_setting vlan_insert;
+ struct iavf_cmd_info args;
+ uint32_t *ethertype;
+ int ret;
+
+ insertion_caps = &vf->vlan_v2_caps.offloads.insertion_support;
+
+ if ((insertion_caps->outer & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
+ (insertion_caps->outer & VIRTCHNL_VLAN_TOGGLE))
+ ethertype = &vlan_insert.outer_ethertype_setting;
+ else if ((insertion_caps->inner & VIRTCHNL_VLAN_ETHERTYPE_8100) &&
+ (insertion_caps->inner & VIRTCHNL_VLAN_TOGGLE))
+ ethertype = &vlan_insert.inner_ethertype_setting;
+ else
+ return -ENOTSUP;
+
+ memset(&vlan_insert, 0, sizeof(vlan_insert));
+ vlan_insert.vport_id = vf->vsi_res->vsi_id;
+ *ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ args.ops = enable ? VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 :
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2;
+ args.in_args = (uint8_t *)&vlan_insert;
+ args.in_args_size = sizeof(vlan_insert);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ ret = iavf_execute_vf_cmd(adapter, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
+ "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2");
+
+ return ret;
+}
+
+int
+iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_vlan_supported_caps *supported_caps;
+ struct virtchnl_vlan_filter_list_v2 vlan_filter;
+ struct virtchnl_vlan *vlan_setting;
+ struct iavf_cmd_info args;
+ uint32_t filtering_caps;
+ int err;
+
+ supported_caps = &vf->vlan_v2_caps.filtering.filtering_support;
+ if (supported_caps->outer) {
+ filtering_caps = supported_caps->outer;
+ vlan_setting = &vlan_filter.filters[0].outer;
+ } else {
+ filtering_caps = supported_caps->inner;
+ vlan_setting = &vlan_filter.filters[0].inner;
+ }
+
+ if (!(filtering_caps & VIRTCHNL_VLAN_ETHERTYPE_8100))
+ return -ENOTSUP;
+
+ memset(&vlan_filter, 0, sizeof(vlan_filter));
+ vlan_filter.vport_id = vf->vsi_res->vsi_id;
+ vlan_filter.num_elements = 1;
+ vlan_setting->tpid = RTE_ETHER_TYPE_VLAN;
+ vlan_setting->tci = vlanid;
+
+ args.ops = add ? VIRTCHNL_OP_ADD_VLAN_V2 : VIRTCHNL_OP_DEL_VLAN_V2;
+ args.in_args = (uint8_t *)&vlan_filter;
+ args.in_args_size = sizeof(vlan_filter);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_VLAN_V2" : "OP_DEL_VLAN_V2");
+
+ return err;
+}
+
+int
+iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_cmd_info args;
+ int ret;
+
+ args.ops = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ ret = iavf_execute_vf_cmd(adapter, &args);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
+ return ret;
+ }
+
+ rte_memcpy(&vf->vlan_v2_caps, vf->aq_resp, sizeof(vf->vlan_v2_caps));
+
+ return 0;
+}
+
int
iavf_enable_queues(struct iavf_adapter *adapter)
{
return err;
}
+int
+iavf_enable_queues_lv(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct iavf_cmd_info args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues) +
+ sizeof(struct virtchnl_queue_chunk) *
+ (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+ queue_select->vport_id = vf->vsi_res->vsi_id;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+ adapter->eth_dev->data->nb_tx_queues;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+ adapter->eth_dev->data->nb_rx_queues;
+
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_ENABLE_QUEUES_V2");
+
+ rte_free(queue_select);
+ return err;
+}
+
+int
+iavf_disable_queues_lv(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct iavf_cmd_info args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues) +
+ sizeof(struct virtchnl_queue_chunk) *
+ (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+ queue_select->vport_id = vf->vsi_res->vsi_id;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+ adapter->eth_dev->data->nb_tx_queues;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+ adapter->eth_dev->data->nb_rx_queues;
+
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_DISABLE_QUEUES_V2");
+
+ rte_free(queue_select);
+ return err;
+}
+
+int
+iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
+ bool rx, bool on)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct iavf_cmd_info args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = 1;
+ queue_select->vport_id = vf->vsi_res->vsi_id;
+
+ if (rx) {
+ queue_chunk->type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk->start_queue_id = qid;
+ queue_chunk->num_queues = 1;
+ } else {
+ queue_chunk->type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk->start_queue_id = qid;
+ queue_chunk->num_queues = 1;
+ }
+
+ if (on)
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+ else
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+ on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
+
+ rte_free(queue_select);
+ return err;
+}
+
int
iavf_configure_rss_lut(struct iavf_adapter *adapter)
{
}
int
-iavf_configure_queues(struct iavf_adapter *adapter)
+iavf_configure_queues(struct iavf_adapter *adapter,
+ uint16_t num_queue_pairs, uint16_t index)
{
struct iavf_rx_queue **rxq =
(struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues;
int err;
size = sizeof(*vc_config) +
- sizeof(vc_config->qpair[0]) * vf->num_queue_pairs;
+ sizeof(vc_config->qpair[0]) * num_queue_pairs;
vc_config = rte_zmalloc("cfg_queue", size, 0);
if (!vc_config)
return -ENOMEM;
vc_config->vsi_id = vf->vsi_res->vsi_id;
- vc_config->num_queue_pairs = vf->num_queue_pairs;
+ vc_config->num_queue_pairs = num_queue_pairs;
- for (i = 0, vc_qp = vc_config->qpair;
- i < vf->num_queue_pairs;
+ for (i = index, vc_qp = vc_config->qpair;
+ i < index + num_queue_pairs;
i++, vc_qp++) {
vc_qp->txq.vsi_id = vf->vsi_res->vsi_id;
vc_qp->txq.queue_id = i;
- /* Virtchnnl configure queues by pairs */
+
+ /* Virtchnnl configure tx queues by pairs */
if (i < adapter->eth_dev->data->nb_tx_queues) {
vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
}
+
vc_qp->rxq.vsi_id = vf->vsi_res->vsi_id;
vc_qp->rxq.queue_id = i;
vc_qp->rxq.max_pkt_size = vf->max_pkt_len;
- /* Virtchnnl configure queues by pairs */
- if (i < adapter->eth_dev->data->nb_rx_queues) {
- vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
- vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
- vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
- }
+ if (i >= adapter->eth_dev->data->nb_rx_queues)
+ continue;
+
+ /* Virtchnnl configure rx queues by pairs */
+ vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
+ vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
+ vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
+ vc_qp->rxq.crc_disable = rxq[i]->crc_len != 0 ? 1 : 0;
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
if (vf->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
- vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
- vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
- PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
- "Queue[%d]", vc_qp->rxq.rxdid, i);
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
+ vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
+ vc_qp->rxq.rxdid = rxq[i]->rxdid;
+ PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+ vc_qp->rxq.rxdid, i);
} else {
+ PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
+ "request default RXDID[%d] in Queue[%d]",
+ rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
- PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
- "Queue[%d]", vc_qp->rxq.rxdid, i);
}
#else
if (vf->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
vf->supported_rxdid & BIT(IAVF_RXDID_LEGACY_0)) {
vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
- PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
- "Queue[%d]", vc_qp->rxq.rxdid, i);
+ PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+ vc_qp->rxq.rxdid, i);
} else {
- PMD_DRV_LOG(ERR, "RXDID == 0 is not supported");
+ PMD_DRV_LOG(ERR, "RXDID[%d] is not supported",
+ IAVF_RXDID_LEGACY_0);
return -1;
}
#endif
return -ENOMEM;
map_info->num_vectors = vf->nb_msix;
- for (i = 0; i < vf->nb_msix; i++) {
- vecmap = &map_info->vecmap[i];
+ for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+ vecmap =
+ &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
vecmap->vsi_id = vf->vsi_res->vsi_id;
vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT;
- vecmap->vector_id = vf->msix_base + i;
+ vecmap->vector_id = vf->qv_map[i].vector_id;
vecmap->txq_map = 0;
- vecmap->rxq_map = vf->rxq_map[vf->msix_base + i];
+ vecmap->rxq_map |= 1 << vf->qv_map[i].queue_id;
}
args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
return err;
}
+int
+iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
+ uint16_t index)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_vector_maps *map_info;
+ struct virtchnl_queue_vector *qv_maps;
+ struct iavf_cmd_info args;
+ int len, i, err;
+ int count = 0;
+
+ len = sizeof(struct virtchnl_queue_vector_maps) +
+ sizeof(struct virtchnl_queue_vector) * (num - 1);
+
+ map_info = rte_zmalloc("map_info", len, 0);
+ if (!map_info)
+ return -ENOMEM;
+
+ map_info->vport_id = vf->vsi_res->vsi_id;
+ map_info->num_qv_maps = num;
+ for (i = index; i < index + map_info->num_qv_maps; i++) {
+ qv_maps = &map_info->qv_maps[count++];
+ qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0;
+ qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX;
+ qv_maps->queue_id = vf->qv_map[i].queue_id;
+ qv_maps->vector_id = vf->qv_map[i].vector_id;
+ }
+
+ args.ops = VIRTCHNL_OP_MAP_QUEUE_VECTOR;
+ args.in_args = (u8 *)map_info;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
+
+ rte_free(map_info);
+ return err;
+}
+
void
iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
{
return err;
}
+int
+iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_rss_hena vrh;
+ struct iavf_cmd_info args;
+ int err;
+
+ vrh.hena = hena;
+ args.ops = VIRTCHNL_OP_SET_RSS_HENA;
+ args.in_args = (u8 *)&vrh;
+ args.in_args_size = sizeof(vrh);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_SET_RSS_HENA");
+
+ return err;
+}
+
int
iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
struct rte_ether_addr *mc_addrs,
return -1;
}
+
+int
+iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_cmd_info args;
+ uint16_t qregion_width;
+ int err;
+
+ args.ops = VIRTCHNL_OP_GET_MAX_RSS_QREGION;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
+ return err;
+ }
+
+ qregion_width =
+ ((struct virtchnl_max_rss_qregion *)args.out_buffer)->qregion_width;
+
+ vf->max_rss_qregion = (uint16_t)(1 << qregion_width);
+
+ return 0;
+}