NULL
};
+#define AR_VENDOR_ID 0x1d6c
static const struct rte_pci_id pci_id_ark_map[] = {
- {RTE_PCI_DEVICE(0x1d6c, 0x100d)},
- {RTE_PCI_DEVICE(0x1d6c, 0x100e)},
- {RTE_PCI_DEVICE(0x1d6c, 0x100f)},
- {RTE_PCI_DEVICE(0x1d6c, 0x1010)},
- {RTE_PCI_DEVICE(0x1d6c, 0x1017)},
- {RTE_PCI_DEVICE(0x1d6c, 0x1018)},
- {RTE_PCI_DEVICE(0x1d6c, 0x1019)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100d)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100e)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100f)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1010)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1017)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1018)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1019)},
{.vendor_id = 0, /* sentinel */ },
};
+/*
+ * This structure is used to statically define the capabilities
+ * of supported devices.
+ * Capabilities:
+ * rqpacing -
+ * Some HW variants require that PCIe read-requests be correctly throttled.
+ * This is called "rqpacing" and has to do with credit and flow control
+ * on certain Arkville implementations.
+ */
+struct ark_caps {
+ bool rqpacing;
+};
+struct ark_dev_caps {
+ uint32_t device_id;
+ struct ark_caps caps;
+};
+#define SET_DEV_CAPS(id, rqp) \
+ {id, {.rqpacing = rqp} }
+
+static const struct ark_dev_caps
+ark_device_caps[] = {
+ SET_DEV_CAPS(0x100d, true),
+ SET_DEV_CAPS(0x100e, true),
+ SET_DEV_CAPS(0x100f, true),
+ SET_DEV_CAPS(0x1010, false),
+ SET_DEV_CAPS(0x1017, true),
+ SET_DEV_CAPS(0x1018, true),
+ SET_DEV_CAPS(0x1019, true),
+ {.device_id = 0,}
+};
+
static int
eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
int ret;
int port_count = 1;
int p;
+ bool rqpacing = false;
ark->eth_dev = dev;
rte_eth_copy_pci_info(dev, pci_dev);
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+ p = 0;
+ while (ark_device_caps[p].device_id != 0) {
+ if (pci_dev->id.device_id == ark_device_caps[p].device_id) {
+ rqpacing = ark_device_caps[p].caps.rqpacing;
+ break;
+ }
+ p++;
+ }
+
/* Use dummy function until setup */
dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE];
ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE];
- ark->rqpacing =
- (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE);
+ if (rqpacing) {
+ ark->rqpacing =
+ (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE);
+ } else {
+ ark->rqpacing = NULL;
+ }
ark->started = 0;
ark->pkt_dir_v = ARK_PKT_DIR_INIT_VAL;
return -1;
}
if (ark->sysctrl.t32[3] != 0) {
- if (ark_rqp_lasped(ark->rqpacing)) {
- ARK_PMD_LOG(ERR, "Arkville Evaluation System - "
- "Timer has Expired\n");
- return -1;
+ if (ark->rqpacing) {
+ if (ark_rqp_lasped(ark->rqpacing)) {
+ ARK_PMD_LOG(ERR, "Arkville Evaluation System - "
+ "Timer has Expired\n");
+ return -1;
+ }
+ ARK_PMD_LOG(WARNING, "Arkville Evaluation System - "
+ "Timer is Running\n");
}
- ARK_PMD_LOG(WARNING, "Arkville Evaluation System - "
- "Timer is Running\n");
}
ARK_PMD_LOG(DEBUG,
ark_ddm_stats_reset(ark->ddm.v);
ark_ddm_stop(ark->ddm.v, 0);
- ark_rqp_stats_reset(ark->rqpacing);
+ if (ark->rqpacing)
+ ark_rqp_stats_reset(ark->rqpacing);
return 0;
}
/*
* TODO This should only be called once for the device during shutdown
*/
- ark_rqp_dump(ark->rqpacing);
+ if (ark->rqpacing)
+ ark_rqp_dump(ark->rqpacing);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
eth_ark_tx_queue_release(dev->data->tx_queues[i]);