rxd = &rxq->sw_desc[id];
rxd->mbuf = m;
- rte_mbuf_refcnt_set(m, 1);
+ SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
m->data_off = RTE_PKTMBUF_HEADROOM;
- m->next = NULL;
- m->nb_segs = 1;
+ SFC_ASSERT(m->next == NULL);
+ SFC_ASSERT(m->nb_segs == 1);
m->port = port_id;
- addr[i] = rte_pktmbuf_mtophys(m);
+ addr[i] = rte_pktmbuf_iova(m);
}
efx_rx_qpost(rxq->common, addr, rxq->buf_size,
return ptypes;
}
+#if EFSYS_OPT_RX_SCALE
static void
sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
struct rte_mbuf *m)
{
-#if EFSYS_OPT_RX_SCALE
uint8_t *mbuf_data;
m->ol_flags |= PKT_RX_RSS_HASH;
}
-#endif
}
+#else
+static void
+sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq,
+ __rte_unused unsigned int flags,
+ __rte_unused struct rte_mbuf *m)
+{
+}
+#endif
static uint16_t
sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (scatter_pkt != NULL) {
if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
- rte_mempool_put(rxq->refill_mb_pool,
- scatter_pkt);
+ rte_pktmbuf_free(scatter_pkt);
goto discard;
}
/* The packet to deliver */
if (desc_flags & EFX_PKT_CONT) {
/* The packet is scattered, more fragments to come */
scatter_pkt = m;
- /* Futher fragments have no prefix */
+ /* Further fragments have no prefix */
prefix_size = 0;
continue;
}
return rxq->pending - rxq->completed;
}
+static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
+static int
+sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ if (unlikely(offset > rxq->ptr_mask))
+ return -EINVAL;
+
+ /*
+ * Poll EvQ to derive up-to-date 'rxq->pending' figure;
+ * it is required for the queue to be running, but the
+ * check is omitted because API design assumes that it
+ * is the duty of the caller to satisfy all conditions
+ */
+ SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
+ SFC_EFX_RXQ_FLAG_RUNNING);
+ sfc_ev_qpoll(rxq->evq);
+
+ /*
+ * There is a handful of reserved entries in the ring,
+ * but an explicit check whether the offset points to
+ * a reserved entry is neglected since the two checks
+ * below rely on the figures which take the HW limits
+ * into account and thus if an entry is reserved, the
+ * checks will fail and UNAVAIL code will be returned
+ */
+
+ if (offset < (rxq->pending - rxq->completed))
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (offset < (rxq->added - rxq->completed))
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
+}
+
struct sfc_rxq *
sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
{
.qpurge = sfc_efx_rx_qpurge,
.supported_ptypes_get = sfc_efx_supported_ptypes_get,
.qdesc_npending = sfc_efx_rx_qdesc_npending,
+ .qdesc_status = sfc_efx_rx_qdesc_status,
.pkt_burst = sfc_efx_recv_pkts,
};
struct sfc_rxq *rxq;
unsigned int retry_count;
unsigned int wait_count;
+ int rc;
rxq = sa->rxq_info[sw_index].rxq;
SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
(retry_count < SFC_RX_QFLUSH_ATTEMPTS);
++retry_count) {
- if (efx_rx_qflush(rxq->common) != 0) {
- rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ rc = efx_rx_qflush(rxq->common);
+ if (rc != 0) {
+ rxq->state |= (rc == EALREADY) ?
+ SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
break;
}
rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
static int
sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
{
- boolean_t rss = (sa->rss_channels > 1) ? B_TRUE : B_FALSE;
+ boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
struct sfc_port *port = &sa->port;
int rc;
int
sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
{
+ struct sfc_port *port = &sa->port;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
struct sfc_evq *evq;
evq = rxq->evq;
- rc = sfc_ev_qstart(sa, evq->evq_index);
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
if (rc != 0)
goto fail_ev_qstart;
rxq->state |= SFC_RXQ_STARTED;
- if (sw_index == 0) {
+ if ((sw_index == 0) && !port->isolated) {
rc = sfc_rx_default_rxq_set_filter(sa, rxq);
if (rc != 0)
goto fail_mac_filter_default_rxq_set;
sfc_rx_qflush(sa, sw_index);
fail_rx_qcreate:
- sfc_ev_qstop(sa, evq->evq_index);
+ sfc_ev_qstop(evq);
fail_ev_qstart:
return rc;
efx_rx_qdestroy(rxq->common);
- sfc_ev_qstop(sa, rxq->evq->evq_index);
+ sfc_ev_qstop(rxq->evq);
}
static int
if (rx_conf->rx_thresh.pthresh != 0 ||
rx_conf->rx_thresh.hthresh != 0 ||
rx_conf->rx_thresh.wthresh != 0) {
- sfc_err(sa,
+ sfc_warn(sa,
"RxQ prefetch/host/writeback thresholds are not supported");
- rc = EINVAL;
}
if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
int rc;
uint16_t buf_size;
struct sfc_rxq_info *rxq_info;
- unsigned int evq_index;
struct sfc_evq *evq;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
- evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
-
- rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
+ rxq_info->entries, socket_id, &evq);
if (rc != 0)
goto fail_ev_qinit;
- evq = sa->evq_info[evq_index].evq;
-
rc = ENOMEM;
rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
socket_id);
info.prefix_size = encp->enc_rx_prefix_size;
#if EFSYS_OPT_RX_SCALE
- if (sa->hash_support == EFX_RX_HASH_AVAILABLE)
+ if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
info.flags |= SFC_RXQ_FLAG_RSS_HASH;
#endif
info.mem_bar = sa->mem_bar.esb_base;
rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
- &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
socket_id, &info, &rxq->dp);
if (rc != 0)
goto fail_dp_rx_qcreate;
rte_free(rxq);
fail_rxq_alloc:
- sfc_ev_qfini(sa, evq_index);
+ sfc_ev_qfini(evq);
fail_ev_qinit:
rxq_info->entries = 0;
rxq_info->entries = 0;
sfc_dma_free(sa, &rxq->mem);
- rte_free(rxq);
- sfc_ev_qfini(sa, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
+ sfc_ev_qfini(rxq->evq);
+ rxq->evq = NULL;
+
+ rte_free(rxq);
}
#if EFSYS_OPT_RX_SCALE
}
#endif
+#if EFSYS_OPT_RX_SCALE
static int
sfc_rx_rss_config(struct sfc_adapter *sa)
{
int rc = 0;
-#if EFSYS_OPT_RX_SCALE
- if (sa->rss_channels > 1) {
- rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ if (sa->rss_channels > 0) {
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ EFX_RX_HASHALG_TOEPLITZ,
sa->rss_hash_types, B_TRUE);
if (rc != 0)
goto finish;
- rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
+ rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ sa->rss_key,
sizeof(sa->rss_key));
if (rc != 0)
goto finish;
- rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
- sizeof(sa->rss_tbl));
+ rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ sa->rss_tbl, RTE_DIM(sa->rss_tbl));
}
finish:
-#endif
return rc;
}
+#else
+static int
+sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa)
+{
+ return 0;
+}
+#endif
int
sfc_rx_start(struct sfc_adapter *sa)
return rc;
}
+/**
+ * Destroy excess queues that are no longer needed after reconfiguration
+ * or complete close.
+ */
+static void
+sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
+{
+ int sw_index;
+
+ SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
+
+ sw_index = sa->rxq_count;
+ while (--sw_index >= (int)nb_rx_queues) {
+ if (sa->rxq_info[sw_index].rxq != NULL)
+ sfc_rx_qfini(sa, sw_index);
+ }
+
+ sa->rxq_count = nb_rx_queues;
+}
+
/**
* Initialize Rx subsystem.
*
- * Called at device configuration stage when number of receive queues is
+ * Called at device (re)configuration stage when number of receive queues is
* specified together with other device level receive configuration.
*
* It should be used to allocate NUMA-unaware resources.
*/
int
-sfc_rx_init(struct sfc_adapter *sa)
+sfc_rx_configure(struct sfc_adapter *sa)
{
struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
- unsigned int sw_index;
+ const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
int rc;
+ sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
+ nb_rx_queues, sa->rxq_count);
+
rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
if (rc != 0)
goto fail_check_mode;
- sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
+ if (nb_rx_queues == sa->rxq_count)
+ goto done;
- rc = ENOMEM;
- sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
- sizeof(struct sfc_rxq_info), 0,
- sa->socket_id);
- if (sa->rxq_info == NULL)
- goto fail_rxqs_alloc;
+ if (sa->rxq_info == NULL) {
+ rc = ENOMEM;
+ sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
+ sizeof(sa->rxq_info[0]), 0,
+ sa->socket_id);
+ if (sa->rxq_info == NULL)
+ goto fail_rxqs_alloc;
+ } else {
+ struct sfc_rxq_info *new_rxq_info;
+
+ if (nb_rx_queues < sa->rxq_count)
+ sfc_rx_fini_queues(sa, nb_rx_queues);
+
+ rc = ENOMEM;
+ new_rxq_info =
+ rte_realloc(sa->rxq_info,
+ nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
+ if (new_rxq_info == NULL && nb_rx_queues > 0)
+ goto fail_rxqs_realloc;
+
+ sa->rxq_info = new_rxq_info;
+ if (nb_rx_queues > sa->rxq_count)
+ memset(&sa->rxq_info[sa->rxq_count], 0,
+ (nb_rx_queues - sa->rxq_count) *
+ sizeof(sa->rxq_info[0]));
+ }
- for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
- rc = sfc_rx_qinit_info(sa, sw_index);
+ while (sa->rxq_count < nb_rx_queues) {
+ rc = sfc_rx_qinit_info(sa, sa->rxq_count);
if (rc != 0)
goto fail_rx_qinit_info;
+
+ sa->rxq_count++;
}
#if EFSYS_OPT_RX_SCALE
sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
- MIN(sa->rxq_count, EFX_MAXRSS) : 1;
+ MIN(sa->rxq_count, EFX_MAXRSS) : 0;
+
+ if (sa->rss_channels > 0) {
+ unsigned int sw_index;
- if (sa->rss_channels > 1) {
for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
}
#endif
+done:
return 0;
fail_rx_qinit_info:
- rte_free(sa->rxq_info);
- sa->rxq_info = NULL;
-
+fail_rxqs_realloc:
fail_rxqs_alloc:
- sa->rxq_count = 0;
+ sfc_rx_close(sa);
+
fail_check_mode:
sfc_log_init(sa, "failed %d", rc);
return rc;
/**
* Shutdown Rx subsystem.
*
- * Called at device close stage, for example, before device
- * reconfiguration or shutdown.
+ * Called at device close stage, for example, before device shutdown.
*/
void
-sfc_rx_fini(struct sfc_adapter *sa)
+sfc_rx_close(struct sfc_adapter *sa)
{
- unsigned int sw_index;
+ sfc_rx_fini_queues(sa, 0);
- sw_index = sa->rxq_count;
- while (sw_index-- > 0) {
- if (sa->rxq_info[sw_index].rxq != NULL)
- sfc_rx_qfini(sa, sw_index);
- }
+ sa->rss_channels = 0;
rte_free(sa->rxq_info);
sa->rxq_info = NULL;
- sa->rxq_count = 0;
}