+ encp = efx_nic_cfg_get(sa->nic);
+ if (encp->enc_rx_es_super_buffer_supported)
+ avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
+ sfc_kvarg_string_handler, &rx_name);
+ if (rc != 0)
+ goto fail_kvarg_rx_datapath;
+
+ if (rx_name != NULL) {
+ dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
+ if (dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath %s not found", rx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Rx datapath %s",
+ rx_name);
+ rc = EINVAL;
+ goto fail_dp_rx_caps;
+ }
+ } else {
+ dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
+ if (dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ }
+
+ sas->dp_rx_name = sfc_strdup(dp_rx->dp.name);
+ if (sas->dp_rx_name == NULL) {
+ rc = ENOMEM;
+ goto fail_dp_rx_name;
+ }
+
+ sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name);
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
+ sfc_kvarg_string_handler, &tx_name);
+ if (rc != 0)
+ goto fail_kvarg_tx_datapath;
+
+ if (tx_name != NULL) {
+ dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
+ if (dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath %s not found", tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Tx datapath %s",
+ tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx_caps;
+ }
+ } else {
+ dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
+ if (dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ }
+
+ sas->dp_tx_name = sfc_strdup(dp_tx->dp.name);
+ if (sas->dp_tx_name == NULL) {
+ rc = ENOMEM;
+ goto fail_dp_tx_name;
+ }
+
+ sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name);
+
+ sa->priv.dp_rx = dp_rx;
+ sa->priv.dp_tx = dp_tx;
+
+ dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_prepare = dp_tx->pkt_prepare;
+ dev->tx_pkt_burst = dp_tx->pkt_burst;
+
+ dev->rx_queue_count = sfc_rx_queue_count;
+ dev->rx_descriptor_status = sfc_rx_descriptor_status;
+ dev->tx_descriptor_status = sfc_tx_descriptor_status;
+ dev->dev_ops = &sfc_eth_dev_ops;
+
+ return 0;
+
+fail_dp_tx_name:
+fail_dp_tx_caps:
+fail_dp_tx:
+fail_kvarg_tx_datapath:
+ rte_free(sas->dp_rx_name);
+ sas->dp_rx_name = NULL;
+
+fail_dp_rx_name:
+fail_dp_rx_caps:
+fail_dp_rx:
+fail_kvarg_rx_datapath:
+ return rc;
+}
+
+static void
+sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+
+ dev->dev_ops = NULL;
+ dev->tx_pkt_prepare = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ rte_free(sas->dp_tx_name);
+ sas->dp_tx_name = NULL;
+ sa->priv.dp_tx = NULL;
+
+ rte_free(sas->dp_rx_name);
+ sas->dp_rx_name = NULL;
+ sa->priv.dp_rx = NULL;
+}
+
+static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
+ .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
+ .reta_query = sfc_dev_rss_reta_query,
+ .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
+ .rxq_info_get = sfc_rx_queue_info_get,
+ .txq_info_get = sfc_tx_queue_info_get,
+};
+
+static int
+sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
+{
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ struct sfc_adapter_priv *sap;
+ const struct sfc_dp_rx *dp_rx;
+ const struct sfc_dp_tx *dp_tx;
+ int rc;
+
+ /*
+ * Allocate process private data from heap, since it should not
+ * be located in shared memory allocated using rte_malloc() API.
+ */
+ sap = calloc(1, sizeof(*sap));
+ if (sap == NULL) {
+ rc = ENOMEM;
+ goto fail_alloc_priv;
+ }
+
+ sap->logtype_main = logtype_main;
+
+ dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name);
+ if (dp_rx == NULL) {
+ SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
+ "cannot find %s Rx datapath", sas->dp_rx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
+ SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
+ "%s Rx datapath does not support multi-process",
+ sas->dp_rx_name);
+ rc = EINVAL;
+ goto fail_dp_rx_multi_process;
+ }
+
+ dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name);
+ if (dp_tx == NULL) {
+ SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
+ "cannot find %s Tx datapath", sas->dp_tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
+ SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
+ "%s Tx datapath does not support multi-process",
+ sas->dp_tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx_multi_process;
+ }
+
+ sap->dp_rx = dp_rx;
+ sap->dp_tx = dp_tx;
+
+ dev->process_private = sap;
+ dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_prepare = dp_tx->pkt_prepare;
+ dev->tx_pkt_burst = dp_tx->pkt_burst;
+ dev->rx_queue_count = sfc_rx_queue_count;
+ dev->rx_descriptor_status = sfc_rx_descriptor_status;
+ dev->tx_descriptor_status = sfc_tx_descriptor_status;
+ dev->dev_ops = &sfc_eth_dev_secondary_ops;
+
+ return 0;
+
+fail_dp_tx_multi_process:
+fail_dp_tx:
+fail_dp_rx_multi_process:
+fail_dp_rx:
+ free(sap);
+
+fail_alloc_priv:
+ return rc;
+}
+
+static void
+sfc_register_dp(void)
+{
+ /* Register once */
+ if (TAILQ_EMPTY(&sfc_dp_head)) {
+ /* Prefer EF10 datapath */
+ sfc_dp_register(&sfc_dp_head, &sfc_ef100_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
+
+ sfc_dp_register(&sfc_dp_head, &sfc_ef100_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
+ }
+}
+
+static int
+sfc_parse_switch_mode(struct sfc_adapter *sa, bool has_representors)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const char *switch_mode = NULL;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_SWITCH_MODE,
+ sfc_kvarg_string_handler, &switch_mode);
+ if (rc != 0)
+ goto fail_kvargs;
+
+ if (switch_mode == NULL) {
+ sa->switchdev = encp->enc_mae_supported &&
+ (!encp->enc_datapath_cap_evb ||
+ has_representors);
+ } else if (strcasecmp(switch_mode, SFC_KVARG_SWITCH_MODE_LEGACY) == 0) {
+ sa->switchdev = false;
+ } else if (strcasecmp(switch_mode,
+ SFC_KVARG_SWITCH_MODE_SWITCHDEV) == 0) {
+ sa->switchdev = true;
+ } else {
+ sfc_err(sa, "invalid switch mode device argument '%s'",
+ switch_mode);
+ rc = EINVAL;
+ goto fail_mode;
+ }
+
+ sfc_log_init(sa, "done");
+
+ return 0;
+
+fail_mode:
+fail_kvargs:
+ sfc_log_init(sa, "failed: %s", rte_strerror(rc));
+
+ return rc;
+}
+
+static int
+sfc_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
+{
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct sfc_ethdev_init_data *init_data = init_params;
+ uint32_t logtype_main;
+ struct sfc_adapter *sa;
+ int rc;
+ const efx_nic_cfg_t *encp;
+ const struct rte_ether_addr *from;
+ int ret;
+
+ if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
+ SFC_EFX_DEV_CLASS_NET) {
+ SFC_GENERIC_LOG(DEBUG,
+ "Incompatible device class: skip probing, should be probed by other sfc driver.");
+ return 1;
+ }
+
+ rc = sfc_dp_mport_register();
+ if (rc != 0)
+ return rc;
+
+ sfc_register_dp();
+
+ logtype_main = sfc_register_logtype(&pci_dev->addr,
+ SFC_LOGTYPE_MAIN_STR,
+ RTE_LOG_NOTICE);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -sfc_eth_dev_secondary_init(dev, logtype_main);
+
+ /* Required for logging */
+ ret = snprintf(sas->log_prefix, sizeof(sas->log_prefix),
+ "PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu16 ": ",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function,
+ dev->data->port_id);
+ if (ret < 0 || ret >= (int)sizeof(sas->log_prefix)) {
+ SFC_GENERIC_LOG(ERR,
+ "reserved log prefix is too short for " PCI_PRI_FMT,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ return -EINVAL;
+ }
+ sas->pci_addr = pci_dev->addr;
+ sas->port_id = dev->data->port_id;
+
+ /*
+ * Allocate process private data from heap, since it should not
+ * be located in shared memory allocated using rte_malloc() API.
+ */
+ sa = calloc(1, sizeof(*sa));
+ if (sa == NULL) {
+ rc = ENOMEM;
+ goto fail_alloc_sa;
+ }
+
+ dev->process_private = sa;
+
+ /* Required for logging */
+ sa->priv.shared = sas;
+ sa->priv.logtype_main = logtype_main;
+
+ sa->eth_dev = dev;
+
+ /* Copy PCI device info to the dev->data */
+ rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+ dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
+
+ rc = sfc_kvargs_parse(sa);
+ if (rc != 0)
+ goto fail_kvargs_parse;
+
+ sfc_log_init(sa, "entry");
+
+ dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
+ if (dev->data->mac_addrs == NULL) {
+ rc = ENOMEM;
+ goto fail_mac_addrs;
+ }
+
+ sfc_adapter_lock_init(sa);
+ sfc_adapter_lock(sa);
+
+ sfc_log_init(sa, "probing");
+ rc = sfc_probe(sa);
+ if (rc != 0)
+ goto fail_probe;
+
+ /*
+ * Selecting a default switch mode requires the NIC to be probed and
+ * to have its capabilities filled in.
+ */
+ rc = sfc_parse_switch_mode(sa, init_data->nb_representors > 0);
+ if (rc != 0)
+ goto fail_switch_mode;
+
+ sfc_log_init(sa, "set device ops");
+ rc = sfc_eth_dev_set_ops(dev);
+ if (rc != 0)
+ goto fail_set_ops;