SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
eth_dev = &rte_eth_devices[dpq->port_id];
- sa = eth_dev->data->dev_private;
+ sa = sfc_adapter_by_eth_dev(eth_dev);
SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->rxq_count);
return &sa->rxq_ctrl[dpq->queue_id];
static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
static int
sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ __rte_unused struct sfc_dp_rx_hw_limits *limits,
__rte_unused struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
static int
sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
{
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
struct sfc_port *port = &sa->port;
int rc;
int
sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
{
- struct sfc_port *port = &sa->port;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
struct sfc_evq *evq;
switch (rxq_info->type) {
case EFX_RXQ_TYPE_DEFAULT:
rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
+ rxq->buf_size,
&rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
rxq_info->type_flags, evq->common, &rxq->common);
break;
rxq_info->state |= SFC_RXQ_STARTED;
- if ((sw_index == 0) && !port->isolated) {
+ if (sw_index == 0 && !sfc_sa2shared(sa)->isolated) {
rc = sfc_rx_default_rxq_set_filter(sa, rxq);
if (rc != 0)
goto fail_mac_filter_default_rxq_set;
struct rte_mempool *mb_pool)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
int rc;
unsigned int rxq_entries;
unsigned int evq_entries;
struct sfc_evq *evq;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
+ struct sfc_dp_rx_hw_limits hw_limits;
+
+ memset(&hw_limits, 0, sizeof(hw_limits));
+ hw_limits.rxq_max_entries = sa->rxq_max_entries;
+ hw_limits.rxq_min_entries = sa->rxq_min_entries;
+ hw_limits.evq_max_entries = sa->evq_max_entries;
+ hw_limits.evq_min_entries = sa->evq_min_entries;
- rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
- &evq_entries, &rxq_max_fill_level);
+ rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, &hw_limits, mb_pool,
+ &rxq_entries, &evq_entries,
+ &rxq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
- SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
- SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
+ SFC_ASSERT(rxq_entries >= sa->rxq_min_entries);
+ SFC_ASSERT(rxq_entries <= sa->rxq_max_entries);
SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
offloads = rx_conf->offloads |
rxq_info->refill_mb_pool = mb_pool;
rxq->buf_size = buf_size;
- rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
+ rc = sfc_dma_alloc(sa, "rxq", sw_index,
+ efx_rxq_size(sa->nic, rxq_info->entries),
socket_id, &rxq->mem);
if (rc != 0)
goto fail_dma_alloc;
int
sfc_rx_hash_init(struct sfc_adapter *sa)
{
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
efx_rx_hash_alg_t alg;
void
sfc_rx_hash_fini(struct sfc_adapter *sa)
{
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
rte_free(rss->hf_map);
}
sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
efx_rx_hash_type_t *efx)
{
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
efx_rx_hash_type_t hash_types = 0;
unsigned int i;
}
uint64_t
-sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, efx_rx_hash_type_t efx)
+sfc_rx_hf_efx_to_rte(struct sfc_rss *rss, efx_rx_hash_type_t efx)
{
- struct sfc_rss *rss = &sa->rss;
uint64_t rte = 0;
unsigned int i;
sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
struct rte_eth_rss_conf *conf)
{
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
efx_rx_hash_type_t efx_hash_types = rss->hash_types;
- uint64_t rss_hf = sfc_rx_hf_efx_to_rte(sa, efx_hash_types);
+ uint64_t rss_hf = sfc_rx_hf_efx_to_rte(rss, efx_hash_types);
int rc;
if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
static int
sfc_rx_rss_config(struct sfc_adapter *sa)
{
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
int rc = 0;
if (rss->channels > 0) {
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
unsigned int max_entries;
- max_entries = EFX_RXQ_MAXNDESCS;
+ max_entries = encp->enc_rxq_max_ndescs;
SFC_ASSERT(rte_is_power_of_2(max_entries));
rxq_info->max_entries = max_entries;
static int
sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
sfc_rx_get_queue_offload_caps(sa);
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_rss *rss = &sas->rss;
int rc = 0;
switch (rxmode->mq_mode) {
sfc_rx_configure(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_rss *rss = &sas->rss;
struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
int rc;
void
sfc_rx_close(struct sfc_adapter *sa)
{
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
sfc_rx_fini_queues(sa, 0);