encp = efx_nic_cfg_get(sa->nic);
- if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
+ if (sa->priv.dp_tx->features & SFC_DP_TX_FEAT_TSO) {
sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
if (!sa->tso)
sfc_warn(sa,
uint8_t key[EFX_RSS_KEY_SIZE];
};
+/* Adapter process private data */
+struct sfc_adapter_priv {
+ const struct sfc_dp_rx *dp_rx;
+ const struct sfc_dp_tx *dp_tx;
+};
+
+static inline struct sfc_adapter_priv *
+sfc_adapter_priv_by_eth_dev(struct rte_eth_dev *eth_dev)
+{
+ struct sfc_adapter_priv *sap = eth_dev->process_private;
+
+ SFC_ASSERT(sap != NULL);
+ return sap;
+}
+
/* Adapter private data */
struct sfc_adapter {
+ /*
+ * It must be the first field of the sfc_adapter structure since
+ * sfc_adapter is the primary process private data (i.e. process
+ * private data plus additional primary process specific data).
+ */
+ struct sfc_adapter_priv priv;
+
/*
* PMD setup and configuration is not thread safe. Since it is not
* performance sensitive, it is better to guarantee thread-safety
* the secondary process to find Rx datapath to be used.
*/
char *dp_rx_name;
- const struct sfc_dp_rx *dp_rx;
/*
* Shared memory copy of the Tx datapath name to be used by
* the secondary process to find Tx datapath to be used.
*/
char *dp_tx_name;
- const struct sfc_dp_tx *dp_tx;
};
/*
static void
sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter *sa = dev->data->dev_private;
struct sfc_rss *rss = &sa->rss;
uint64_t txq_offloads_def = 0;
*/
dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
- if (sa->dp_rx->get_dev_info != NULL)
- sa->dp_rx->get_dev_info(dev_info);
- if (sa->dp_tx->get_dev_info != NULL)
- sa->dp_tx->get_dev_info(dev_info);
+ if (sap->dp_rx->get_dev_info != NULL)
+ sap->dp_rx->get_dev_info(dev_info);
+ if (sap->dp_tx->get_dev_info != NULL)
+ sap->dp_tx->get_dev_info(dev_info);
dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
static const uint32_t *
sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter *sa = dev->data->dev_private;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
uint32_t tunnel_encaps = encp->enc_tunnel_encapsulations_supported;
- return sa->dp_rx->supported_ptypes_get(tunnel_encaps);
+ return sap->dp_rx->supported_ptypes_get(tunnel_encaps);
}
static int
static uint32_t
sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter *sa = dev->data->dev_private;
struct sfc_rxq *rxq;
if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
return 0;
- return sa->dp_rx->qdesc_npending(rxq->dp);
+ return sap->dp_rx->qdesc_npending(rxq->dp);
}
static int
struct sfc_dp_rxq *dp_rxq = queue;
struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
- return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
+ return offset < rxq->evq->sa->priv.dp_rx->qdesc_npending(dp_rxq);
}
static int
struct sfc_dp_rxq *dp_rxq = queue;
struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
- return rxq->evq->sa->dp_rx->qdesc_status(dp_rxq, offset);
+ return rxq->evq->sa->priv.dp_rx->qdesc_status(dp_rxq, offset);
}
static int
struct sfc_dp_txq *dp_txq = queue;
struct sfc_txq *txq = sfc_txq_by_dp_txq(dp_txq);
- return txq->evq->sa->dp_tx->qdesc_status(dp_txq, offset);
+ return txq->evq->sa->priv.dp_tx->qdesc_status(dp_txq, offset);
}
static int
static int
sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
/*
* If Rx datapath does not provide callback to check mempool,
* all pools are supported.
*/
- if (sa->dp_rx->pool_ops_supported == NULL)
+ if (sap->dp_rx->pool_ops_supported == NULL)
return 1;
- return sa->dp_rx->pool_ops_supported(pool);
+ return sap->dp_rx->pool_ops_supported(pool);
}
static const struct eth_dev_ops sfc_eth_dev_ops = {
sfc_notice(sa, "use %s Tx datapath", sa->dp_tx_name);
- sa->dp_rx = dp_rx;
- sa->dp_tx = dp_tx;
+ sa->priv.dp_rx = dp_rx;
+ sa->priv.dp_tx = dp_tx;
dev->rx_pkt_burst = dp_rx->pkt_burst;
dev->tx_pkt_burst = dp_tx->pkt_burst;
rte_free(sa->dp_tx_name);
sa->dp_tx_name = NULL;
- sa->dp_tx = NULL;
+ sa->priv.dp_tx = NULL;
rte_free(sa->dp_rx_name);
sa->dp_rx_name = NULL;
- sa->dp_rx = NULL;
+ sa->priv.dp_rx = NULL;
}
static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
};
static int
-sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev, uint32_t logtype_main)
+sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
{
/*
* Device private data has really many process-local pointers.
* in shared memory only.
*/
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter_priv *sap;
const struct sfc_dp_rx *dp_rx;
const struct sfc_dp_tx *dp_tx;
int rc;
+ /*
+ * Allocate process private data from heap, since it should not
+ * be located in shared memory allocated using rte_malloc() API.
+ */
+ sap = calloc(1, sizeof(*sap));
+ if (sap == NULL) {
+ rc = ENOMEM;
+ goto fail_alloc_priv;
+ }
+
dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name);
if (dp_rx == NULL) {
SFC_LOG(sa, RTE_LOG_ERR, logtype_main,
goto fail_dp_tx_multi_process;
}
+ sap->dp_rx = dp_rx;
+ sap->dp_tx = dp_tx;
+
+ dev->process_private = sap;
dev->rx_pkt_burst = dp_rx->pkt_burst;
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->dev_ops = &sfc_eth_dev_secondary_ops;
fail_dp_tx:
fail_dp_rx_multi_process:
fail_dp_rx:
+ free(sap);
+
+fail_alloc_priv:
return rc;
}
static void
sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
{
+ free(dev->process_private);
+ dev->process_private = NULL;
dev->dev_ops = NULL;
dev->tx_pkt_burst = NULL;
dev->rx_pkt_burst = NULL;
RTE_LOG_NOTICE);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -sfc_eth_dev_secondary_set_ops(dev, logtype_main);
+ return -sfc_eth_dev_secondary_init(dev, logtype_main);
+
+ /*
+ * sfc_adapter is a mixture of shared and process private data.
+ * During transition period use it in both kinds. When the
+ * driver becomes ready to separate it, sfc_adapter will become
+ * primary process private only.
+ */
+ dev->process_private = sa;
/* Required for logging */
sa->pci_addr = pci_dev->addr;
fail_kvargs_parse:
sfc_log_init(sa, "failed %d", rc);
+ dev->process_private = NULL;
SFC_ASSERT(rc > 0);
return -rc;
}
dp_rxq = evq->dp_rxq;
SFC_ASSERT(dp_rxq != NULL);
- SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL);
- return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
+ SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL);
+ return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, id);
}
static boolean_t
dp_rxq = evq->dp_rxq;
SFC_ASSERT(dp_rxq != NULL);
- if (evq->sa->dp_rx->qrx_ps_ev != NULL)
- return evq->sa->dp_rx->qrx_ps_ev(dp_rxq, id);
+ if (evq->sa->priv.dp_rx->qrx_ps_ev != NULL)
+ return evq->sa->priv.dp_rx->qrx_ps_ev(dp_rxq, id);
else
return B_FALSE;
}
dp_txq = evq->dp_txq;
SFC_ASSERT(dp_txq != NULL);
- SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL);
- return evq->sa->dp_tx->qtx_ev(dp_txq, id);
+ SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL);
+ return evq->sa->priv.dp_tx->qtx_ev(dp_txq, id);
}
static boolean_t
SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
if (evq->dp_rxq != 0) {
- if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ if (strcmp(sa->priv.dp_rx->dp.name,
+ SFC_KVARG_DATAPATH_EFX) == 0)
evq->callbacks = &sfc_ev_callbacks_efx_rx;
else
evq->callbacks = &sfc_ev_callbacks_dp_rx;
} else if (evq->dp_txq != 0) {
- if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ if (strcmp(sa->priv.dp_tx->dp.name,
+ SFC_KVARG_DATAPATH_EFX) == 0)
evq->callbacks = &sfc_ev_callbacks_efx_tx;
else
evq->callbacks = &sfc_ev_callbacks_dp_tx;
struct rte_flow_error *error)
{
int rc;
- const unsigned int dp_rx_features = sa->dp_rx->features;
+ const unsigned int dp_rx_features = sa->priv.dp_rx->features;
uint32_t actions_set = 0;
const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
(1UL << RTE_FLOW_ACTION_TYPE_RSS) |
sfc_notice(sa, "RxQ %u flushed", sw_index);
}
- sa->dp_rx->qpurge(rxq->dp);
+ sa->priv.dp_rx->qpurge(rxq->dp);
}
static int
efx_rx_qenable(rxq->common);
- rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
+ rc = sa->priv.dp_rx->qstart(rxq->dp, evq->read_ptr);
if (rc != 0)
goto fail_dp_qstart;
return 0;
fail_mac_filter_default_rxq_set:
- sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+ sa->priv.dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
fail_dp_qstart:
sfc_rx_qflush(sa, sw_index);
sa->eth_dev->data->rx_queue_state[sw_index] =
RTE_ETH_QUEUE_STATE_STOPPED;
- sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+ sa->priv.dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
if (sw_index == 0)
efx_mac_filter_default_rxq_clear(sa->nic);
caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- if (sa->dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
+ if (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
}
if (encp->enc_tunnel_encapsulations_supported &&
- (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
+ (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
return caps;
{
uint64_t caps = 0;
- if (sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
+ if (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
caps |= DEV_RX_OFFLOAD_SCATTER;
return caps;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
- rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
- &evq_entries, &rxq_max_fill_level);
+ rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
+ &evq_entries, &rxq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
rxq_info->entries = rxq_entries;
- if (sa->dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
+ if (sa->priv.dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
else
rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
- (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
+ (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
info.mem_bar = sa->mem_bar.esb_base;
info.vi_window_shift = encp->enc_vi_window_shift;
- rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
- &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
- socket_id, &info, &rxq->dp);
+ rc = sa->priv.dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &rxq->dp);
if (rc != 0)
goto fail_dp_rx_qcreate;
rxq = rxq_info->rxq;
SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
- sa->dp_rx->qdestroy(rxq->dp);
+ sa->priv.dp_rx->qdestroy(rxq->dp);
rxq->dp = NULL;
rxq_info->rxq = NULL;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
uint64_t caps = 0;
- if ((sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) &&
+ if ((sa->priv.dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) &&
encp->enc_hw_tx_insert_vlan_enabled)
caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
- if (sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
+ if (sa->priv.dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
caps |= DEV_TX_OFFLOAD_MULTI_SEGS;
- if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) &&
- (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT))
+ if ((~sa->priv.dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) &&
+ (~sa->priv.dp_tx->features & SFC_DP_TX_FEAT_REFCNT))
caps |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
return caps;
sfc_log_init(sa, "TxQ = %u", sw_index);
- rc = sa->dp_tx->qsize_up_rings(nb_tx_desc, &txq_entries, &evq_entries,
- &txq_max_fill_level);
+ rc = sa->priv.dp_tx->qsize_up_rings(nb_tx_desc, &txq_entries,
+ &evq_entries, &txq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS);
info.tso_tcp_header_offset_limit =
encp->enc_tx_tso_tcp_header_offset_limit;
- rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
- &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
- socket_id, &info, &txq->dp);
+ rc = sa->priv.dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &txq->dp);
if (rc != 0)
goto fail_dp_tx_qinit;
SFC_ASSERT(txq != NULL);
SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
- sa->dp_tx->qdestroy(txq->dp);
+ sa->priv.dp_tx->qdestroy(txq->dp);
txq->dp = NULL;
txq_info->txq = NULL;
txq->state |= SFC_TXQ_STARTED;
- rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
+ rc = sa->priv.dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
if (rc != 0)
goto fail_dp_qstart;
SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
- sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);
+ sa->priv.dp_tx->qstop(txq->dp, &txq->evq->read_ptr);
/*
* Retry TX queue flushing in case of flush failed or
sfc_notice(sa, "TxQ %u flushed", sw_index);
}
- sa->dp_tx->qreap(txq->dp);
+ sa->priv.dp_tx->qreap(txq->dp);
txq->state = SFC_TXQ_INITIALIZED;