rxd = &rxq->sw_desc[id];
rxd->mbuf = m;
- rte_mbuf_refcnt_set(m, 1);
+ SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
m->data_off = RTE_PKTMBUF_HEADROOM;
- m->next = NULL;
- m->nb_segs = 1;
+ SFC_ASSERT(m->next == NULL);
+ SFC_ASSERT(m->nb_segs == 1);
m->port = port_id;
- addr[i] = rte_pktmbuf_mtophys(m);
+ addr[i] = rte_pktmbuf_iova(m);
}
efx_rx_qpost(rxq->common, addr, rxq->buf_size,
return ptypes;
}
+#if EFSYS_OPT_RX_SCALE
static void
sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
struct rte_mbuf *m)
{
-#if EFSYS_OPT_RX_SCALE
uint8_t *mbuf_data;
m->ol_flags |= PKT_RX_RSS_HASH;
}
-#endif
}
+#else
+static void
+sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq,
+ __rte_unused unsigned int flags,
+ __rte_unused struct rte_mbuf *m)
+{
+}
+#endif
static uint16_t
sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (desc_flags & EFX_PKT_CONT) {
/* The packet is scattered, more fragments to come */
scatter_pkt = m;
- /* Futher fragments have no prefix */
+ /* Further fragments have no prefix */
prefix_size = 0;
continue;
}
return rxq->pending - rxq->completed;
}
+static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
+static int
+sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ if (unlikely(offset > rxq->ptr_mask))
+ return -EINVAL;
+
+ /*
+ * Poll EvQ to derive up-to-date 'rxq->pending' figure;
+ * it is required for the queue to be running, but the
+ * check is omitted because API design assumes that it
+ * is the duty of the caller to satisfy all conditions
+ */
+ SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
+ SFC_EFX_RXQ_FLAG_RUNNING);
+ sfc_ev_qpoll(rxq->evq);
+
+ /*
+ * There is a handful of reserved entries in the ring,
+ * but an explicit check whether the offset points to
+ * a reserved entry is neglected since the two checks
+ * below rely on the figures which take the HW limits
+ * into account and thus if an entry is reserved, the
+ * checks will fail and UNAVAIL code will be returned
+ */
+
+ if (offset < (rxq->pending - rxq->completed))
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (offset < (rxq->added - rxq->completed))
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
+}
+
struct sfc_rxq *
sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
{
.qpurge = sfc_efx_rx_qpurge,
.supported_ptypes_get = sfc_efx_supported_ptypes_get,
.qdesc_npending = sfc_efx_rx_qdesc_npending,
+ .qdesc_status = sfc_efx_rx_qdesc_status,
.pkt_burst = sfc_efx_recv_pkts,
};
struct sfc_rxq *rxq;
unsigned int retry_count;
unsigned int wait_count;
+ int rc;
rxq = sa->rxq_info[sw_index].rxq;
SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
(retry_count < SFC_RX_QFLUSH_ATTEMPTS);
++retry_count) {
- if (efx_rx_qflush(rxq->common) != 0) {
- rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ rc = efx_rx_qflush(rxq->common);
+ if (rc != 0) {
+ rxq->state |= (rc == EALREADY) ?
+ SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
break;
}
rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
static int
sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
{
- boolean_t rss = (sa->rss_channels > 1) ? B_TRUE : B_FALSE;
+ boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
struct sfc_port *port = &sa->port;
int rc;
int
sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
{
+ struct sfc_port *port = &sa->port;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
struct sfc_evq *evq;
rxq->state |= SFC_RXQ_STARTED;
- if (sw_index == 0) {
+ if ((sw_index == 0) && !port->isolated) {
rc = sfc_rx_default_rxq_set_filter(sa, rxq);
if (rc != 0)
goto fail_mac_filter_default_rxq_set;
if (rx_conf->rx_thresh.pthresh != 0 ||
rx_conf->rx_thresh.hthresh != 0 ||
rx_conf->rx_thresh.wthresh != 0) {
- sfc_err(sa,
+ sfc_warn(sa,
"RxQ prefetch/host/writeback thresholds are not supported");
- rc = EINVAL;
}
if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
info.prefix_size = encp->enc_rx_prefix_size;
#if EFSYS_OPT_RX_SCALE
- if (sa->hash_support == EFX_RX_HASH_AVAILABLE)
+ if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
info.flags |= SFC_RXQ_FLAG_RSS_HASH;
#endif
info.mem_bar = sa->mem_bar.esb_base;
rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
- &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
socket_id, &info, &rxq->dp);
if (rc != 0)
goto fail_dp_rx_qcreate;
}
#endif
+#if EFSYS_OPT_RX_SCALE
static int
sfc_rx_rss_config(struct sfc_adapter *sa)
{
int rc = 0;
-#if EFSYS_OPT_RX_SCALE
- if (sa->rss_channels > 1) {
- rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ if (sa->rss_channels > 0) {
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ EFX_RX_HASHALG_TOEPLITZ,
sa->rss_hash_types, B_TRUE);
if (rc != 0)
goto finish;
- rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
+ rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ sa->rss_key,
sizeof(sa->rss_key));
if (rc != 0)
goto finish;
- rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
- sizeof(sa->rss_tbl));
+ rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ sa->rss_tbl, RTE_DIM(sa->rss_tbl));
}
finish:
-#endif
return rc;
}
+#else
+static int
+sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa)
+{
+ return 0;
+}
+#endif
int
sfc_rx_start(struct sfc_adapter *sa)
{
struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
- unsigned int sw_index;
int rc;
sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
#if EFSYS_OPT_RX_SCALE
sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
- MIN(sa->rxq_count, EFX_MAXRSS) : 1;
+ MIN(sa->rxq_count, EFX_MAXRSS) : 0;
+
+ if (sa->rss_channels > 0) {
+ unsigned int sw_index;
- if (sa->rss_channels > 1) {
for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
}
{
sfc_rx_fini_queues(sa, 0);
+ sa->rss_channels = 0;
+
rte_free(sa->rxq_info);
sa->rxq_info = NULL;
}