}
static int
-sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+ sfc_sw_index_t sw_index;
int rc;
sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
- rx_queue_id, nb_rx_desc, socket_id);
+ ethdev_qid, nb_rx_desc, socket_id);
sfc_adapter_lock(sa);
- rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
+ rc = sfc_rx_qinit(sa, sw_index, nb_rx_desc, socket_id,
rx_conf, mb_pool);
if (rc != 0)
goto fail_rx_qinit;
- dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp;
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+ dev->data->rx_queues[ethdev_qid] = rxq_info->dp;
sfc_adapter_unlock(sa);
struct sfc_dp_rxq *dp_rxq = queue;
struct sfc_rxq *rxq;
struct sfc_adapter *sa;
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
if (dp_rxq == NULL)
return;
* use any process-local pointers from the adapter data.
*/
static void
-sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
struct rte_eth_rxq_info *qinfo)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(rx_queue_id < sas->rxq_count);
-
- rxq_info = &sas->rxq_info[rx_queue_id];
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
qinfo->mp = rxq_info->refill_mb_pool;
qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
* use any process-local pointers from the adapter data.
*/
static uint32_t
-sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(rx_queue_id < sas->rxq_count);
- rxq_info = &sas->rxq_info[rx_queue_id];
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
return 0;
}
static int
-sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+ sfc_sw_index_t sw_index;
int rc;
- sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+ sfc_log_init(sa, "RxQ=%u", ethdev_qid);
sfc_adapter_lock(sa);
if (sa->state != SFC_ADAPTER_STARTED)
goto fail_not_started;
- if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED)
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+ if (rxq_info->state != SFC_RXQ_INITIALIZED)
goto fail_not_setup;
- rc = sfc_rx_qstart(sa, rx_queue_id);
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
+ rc = sfc_rx_qstart(sa, sw_index);
if (rc != 0)
goto fail_rx_qstart;
- sas->rxq_info[rx_queue_id].deferred_started = B_TRUE;
+ rxq_info->deferred_started = B_TRUE;
sfc_adapter_unlock(sa);
}
static int
-sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+ struct sfc_rxq_info *rxq_info;
+ sfc_sw_index_t sw_index;
- sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+ sfc_log_init(sa, "RxQ=%u", ethdev_qid);
sfc_adapter_lock(sa);
- sfc_rx_qstop(sa, rx_queue_id);
- sas->rxq_info[rx_queue_id].deferred_started = B_FALSE;
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
+ sfc_rx_qstop(sa, sw_index);
+
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+ rxq_info->deferred_started = B_FALSE;
sfc_adapter_unlock(sa);
}
static int
-sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(queue_id < sas->rxq_count);
- rxq_info = &sas->rxq_info[queue_id];
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
return sap->dp_rx->intr_enable(rxq_info->dp);
}
static int
-sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(queue_id < sas->rxq_count);
- rxq_info = &sas->rxq_info[queue_id];
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
return sap->dp_rx->intr_disable(rxq_info->dp);
}
};
static void
-sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_rx_qflush(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
unsigned int retry_count;
unsigned int wait_count;
int rc;
+ ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
(wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
if (rxq_info->state & SFC_RXQ_FLUSHING)
- sfc_err(sa, "RxQ %u flush timed out", sw_index);
+ sfc_err(sa, "RxQ %d (internal %u) flush timed out",
+ ethdev_qid, sw_index);
if (rxq_info->state & SFC_RXQ_FLUSH_FAILED)
- sfc_err(sa, "RxQ %u flush failed", sw_index);
+ sfc_err(sa, "RxQ %d (internal %u) flush failed",
+ ethdev_qid, sw_index);
if (rxq_info->state & SFC_RXQ_FLUSHED)
- sfc_notice(sa, "RxQ %u flushed", sw_index);
+ sfc_notice(sa, "RxQ %d (internal %u) flushed",
+ ethdev_qid, sw_index);
}
sa->priv.dp_rx->qpurge(rxq_info->dp);
}
int
-sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_rx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
struct sfc_evq *evq;
efx_rx_prefix_layout_t pinfo;
int rc;
- sfc_log_init(sa, "sw_index=%u", sw_index);
-
SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
+ ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
+
+ sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
rxq = &sa->rxq_ctrl[sw_index];
evq = rxq->evq;
- rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
+ rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index));
if (rc != 0)
goto fail_ev_qstart;
rxq_info->state |= SFC_RXQ_STARTED;
- if (sw_index == 0 && !sfc_sa2shared(sa)->isolated) {
+ if (ethdev_qid == 0 && !sfc_sa2shared(sa)->isolated) {
rc = sfc_rx_default_rxq_set_filter(sa, rxq);
if (rc != 0)
goto fail_mac_filter_default_rxq_set;
}
/* It seems to be used by DPDK for debug purposes only ('rte_ether') */
- sa->eth_dev->data->rx_queue_state[sw_index] =
- RTE_ETH_QUEUE_STATE_STARTED;
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
+ sa->eth_dev->data->rx_queue_state[ethdev_qid] =
+ RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
void
-sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_rx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
- sfc_log_init(sa, "sw_index=%u", sw_index);
-
SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
+ ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
+
+ sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
/* It seems to be used by DPDK for debug purposes only ('rte_ether') */
- sa->eth_dev->data->rx_queue_state[sw_index] =
- RTE_ETH_QUEUE_STATE_STOPPED;
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
+ sa->eth_dev->data->rx_queue_state[ethdev_qid] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
rxq = &sa->rxq_ctrl[sw_index];
sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
- if (sw_index == 0)
+ if (ethdev_qid == 0)
efx_mac_filter_default_rxq_clear(sa->nic);
sfc_rx_qflush(sa, sw_index);
}
int
-sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
int rc;
SFC_ASSERT(rxq_entries <= sa->rxq_max_entries);
SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
- offloads = rx_conf->offloads |
- sa->eth_dev->data->dev_conf.rxmode.offloads;
+ ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
+
+ offloads = rx_conf->offloads;
+ /* Add device level Rx offloads if the queue is an ethdev Rx queue */
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
+ offloads |= sa->eth_dev->data->dev_conf.rxmode.offloads;
+
rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
if (buf_size == 0) {
- sfc_err(sa, "RxQ %u mbuf pool object size is too small",
- sw_index);
+ sfc_err(sa,
+ "RxQ %d (internal %u) mbuf pool object size is too small",
+ ethdev_qid, sw_index);
rc = EINVAL;
goto fail_bad_conf;
}
(offloads & DEV_RX_OFFLOAD_SCATTER),
encp->enc_rx_scatter_max,
&error)) {
- sfc_err(sa, "RxQ %u MTU check failed: %s", sw_index, error);
- sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
+ sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
+ ethdev_qid, sw_index, error);
+ sfc_err(sa,
+ "RxQ %d (internal %u) calculated Rx buffer size is %u vs "
"PDU size %u plus Rx prefix %u bytes",
- sw_index, buf_size, (unsigned int)sa->port.pdu,
- encp->enc_rx_prefix_size);
+ ethdev_qid, sw_index, buf_size,
+ (unsigned int)sa->port.pdu, encp->enc_rx_prefix_size);
rc = EINVAL;
goto fail_bad_conf;
}
info.flags = rxq_info->rxq_flags;
info.rxq_entries = rxq_info->entries;
info.rxq_hw_ring = rxq->mem.esm_base;
- info.evq_hw_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
+ info.evq_hw_index = sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index);
info.evq_entries = evq_entries;
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = rxq->hw_index;
}
void
-sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
- sa->eth_dev->data->rx_queues[sw_index] = NULL;
+ ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
+
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
+ sa->eth_dev->data->rx_queues[ethdev_qid] = NULL;
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
return rc;
}
+struct sfc_rxq_info *
+sfc_rxq_info_by_ethdev_qid(struct sfc_adapter_shared *sas,
+ sfc_ethdev_qid_t ethdev_qid)
+{
+ sfc_sw_index_t sw_index;
+
+ SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
+ SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
+
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
+ return &sas->rxq_info[sw_index];
+}
+
+struct sfc_rxq *
+sfc_rxq_ctrl_by_ethdev_qid(struct sfc_adapter *sa, sfc_ethdev_qid_t ethdev_qid)
+{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_sw_index_t sw_index;
+
+ SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
+ SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
+
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
+ return &sa->rxq_ctrl[sw_index];
+}
+
int
sfc_rx_start(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
int rc;
- sfc_log_init(sa, "rxq_count=%u", sas->rxq_count);
+ sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
+ sas->rxq_count);
rc = efx_rx_init(sa->nic);
if (rc != 0)
sfc_rx_stop(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
- sfc_log_init(sa, "rxq_count=%u", sas->rxq_count);
+ sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
+ sas->rxq_count);
sw_index = sas->rxq_count;
while (sw_index-- > 0) {
}
static int
-sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_rx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- int sw_index;
+ sfc_sw_index_t sw_index;
+ sfc_ethdev_qid_t ethdev_qid;
- SFC_ASSERT(nb_rx_queues <= sas->rxq_count);
+ SFC_ASSERT(nb_rx_queues <= sas->ethdev_rxq_count);
- sw_index = sas->rxq_count;
- while (--sw_index >= (int)nb_rx_queues) {
- if (sas->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED)
+ /*
+ * Finalize only ethdev queues since other ones are finalized only
+ * on device close and they may require additional deinitializaton.
+ */
+ ethdev_qid = sas->ethdev_rxq_count;
+ while (--ethdev_qid >= (int)nb_rx_queues) {
+ struct sfc_rxq_info *rxq_info;
+
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, ethdev_qid);
+ if (rxq_info->state & SFC_RXQ_INITIALIZED) {
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
+ ethdev_qid);
sfc_rx_qfini(sa, sw_index);
+ }
+
}
- sas->rxq_count = nb_rx_queues;
+ sas->ethdev_rxq_count = nb_rx_queues;
}
/**
int rc;
sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
- nb_rx_queues, sas->rxq_count);
+ nb_rx_queues, sas->ethdev_rxq_count);
rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
if (rc != 0)
struct sfc_rxq_info *new_rxq_info;
struct sfc_rxq *new_rxq_ctrl;
- if (nb_rx_queues < sas->rxq_count)
+ if (nb_rx_queues < sas->ethdev_rxq_count)
sfc_rx_fini_queues(sa, nb_rx_queues);
rc = ENOMEM;
sas->rxq_info = new_rxq_info;
sa->rxq_ctrl = new_rxq_ctrl;
if (nb_rx_queues > sas->rxq_count) {
- memset(&sas->rxq_info[sas->rxq_count], 0,
- (nb_rx_queues - sas->rxq_count) *
+ unsigned int rxq_count = sas->rxq_count;
+
+ memset(&sas->rxq_info[rxq_count], 0,
+ (nb_rx_queues - rxq_count) *
sizeof(sas->rxq_info[0]));
- memset(&sa->rxq_ctrl[sas->rxq_count], 0,
- (nb_rx_queues - sas->rxq_count) *
+ memset(&sa->rxq_ctrl[rxq_count], 0,
+ (nb_rx_queues - rxq_count) *
sizeof(sa->rxq_ctrl[0]));
}
}
- while (sas->rxq_count < nb_rx_queues) {
- rc = sfc_rx_qinit_info(sa, sas->rxq_count);
+ while (sas->ethdev_rxq_count < nb_rx_queues) {
+ sfc_sw_index_t sw_index;
+
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
+ sas->ethdev_rxq_count);
+ rc = sfc_rx_qinit_info(sa, sw_index);
if (rc != 0)
goto fail_rx_qinit_info;
- sas->rxq_count++;
+ sas->ethdev_rxq_count++;
}
+ sas->rxq_count = sas->ethdev_rxq_count;
+
configure_rss:
rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
- MIN(sas->rxq_count, EFX_MAXRSS) : 0;
+ MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
if (rss->channels > 0) {
struct rte_eth_rss_conf *adv_conf_rss;
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
rss->tbl[sw_index] = sw_index % rss->channels;