NULL
};
+#define AR_VENDOR_ID 0x1d6c
static const struct rte_pci_id pci_id_ark_map[] = {
- {RTE_PCI_DEVICE(0x1d6c, 0x100d)},
- {RTE_PCI_DEVICE(0x1d6c, 0x100e)},
- {RTE_PCI_DEVICE(0x1d6c, 0x100f)},
- {RTE_PCI_DEVICE(0x1d6c, 0x1010)},
- {RTE_PCI_DEVICE(0x1d6c, 0x1017)},
- {RTE_PCI_DEVICE(0x1d6c, 0x1018)},
- {RTE_PCI_DEVICE(0x1d6c, 0x1019)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100d)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100e)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x100f)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1010)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1017)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1018)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1019)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x101e)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x101f)},
{.vendor_id = 0, /* sentinel */ },
};
+/*
+ * This structure is used to statically define the capabilities
+ * of supported devices.
+ * Capabilities:
+ * rqpacing -
+ * Some HW variants require that PCIe read-requests be correctly throttled.
+ * This is called "rqpacing" and has to do with credit and flow control
+ * on certain Arkville implementations.
+ */
+struct ark_caps {
+ bool rqpacing;
+};
+struct ark_dev_caps {
+ uint32_t device_id;
+ struct ark_caps caps;
+};
+#define SET_DEV_CAPS(id, rqp) \
+ {id, {.rqpacing = rqp} }
+
+static const struct ark_dev_caps
+ark_device_caps[] = {
+ SET_DEV_CAPS(0x100d, true),
+ SET_DEV_CAPS(0x100e, true),
+ SET_DEV_CAPS(0x100f, true),
+ SET_DEV_CAPS(0x1010, false),
+ SET_DEV_CAPS(0x1017, true),
+ SET_DEV_CAPS(0x1018, true),
+ SET_DEV_CAPS(0x1019, true),
+ SET_DEV_CAPS(0x101e, false),
+ SET_DEV_CAPS(0x101f, false),
+ {.device_id = 0,}
+};
+
static int
eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
int ret;
int port_count = 1;
int p;
+ bool rqpacing = false;
ark->eth_dev = dev;
rte_eth_copy_pci_info(dev, pci_dev);
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+ p = 0;
+ while (ark_device_caps[p].device_id != 0) {
+ if (pci_dev->id.device_id == ark_device_caps[p].device_id) {
+ rqpacing = ark_device_caps[p].caps.rqpacing;
+ break;
+ }
+ p++;
+ }
+
/* Use dummy function until setup */
- dev->rx_pkt_burst = ð_ark_recv_pkts_noop;
- dev->tx_pkt_burst = ð_ark_xmit_pkts_noop;
+ dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE];
ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE];
- ark->rqpacing =
- (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE);
+ if (rqpacing) {
+ ark->rqpacing =
+ (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE);
+ } else {
+ ark->rqpacing = NULL;
+ }
ark->started = 0;
ark->pkt_dir_v = ARK_PKT_DIR_INIT_VAL;
return -1;
}
if (ark->sysctrl.t32[3] != 0) {
- if (ark_rqp_lasped(ark->rqpacing)) {
- ARK_PMD_LOG(ERR, "Arkville Evaluation System - "
- "Timer has Expired\n");
- return -1;
+ if (ark->rqpacing) {
+ if (ark_rqp_lasped(ark->rqpacing)) {
+ ARK_PMD_LOG(ERR, "Arkville Evaluation System - "
+ "Timer has Expired\n");
+ return -1;
+ }
+ ARK_PMD_LOG(WARNING, "Arkville Evaluation System - "
+ "Timer is Running\n");
}
- ARK_PMD_LOG(WARNING, "Arkville Evaluation System - "
- "Timer is Running\n");
}
ARK_PMD_LOG(DEBUG,
* known state
*/
ark->start_pg = 0;
+ ark->pg_running = 0;
ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1);
if (ark->pg == NULL)
return -1;
num_q = ark_api_num_queues(mpu);
ark->rx_queues = num_q;
for (i = 0; i < num_q; i++) {
- ark_mpu_reset(mpu);
mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
}
- ark_udm_stop(ark->udm.v, 0);
- ark_udm_configure(ark->udm.v,
- RTE_PKTMBUF_HEADROOM,
- RTE_MBUF_DEFAULT_DATAROOM,
- ARK_RX_WRITE_TIME_NS);
- ark_udm_stats_reset(ark->udm.v);
- ark_udm_stop(ark->udm.v, 0);
-
- /* TX -- DDM */
- if (ark_ddm_stop(ark->ddm.v, 1))
- ARK_PMD_LOG(ERR, "Unable to stop DDM\n");
-
mpu = ark->mputx.v;
num_q = ark_api_num_queues(mpu);
ark->tx_queues = num_q;
for (i = 0; i < num_q; i++) {
- ark_mpu_reset(mpu);
mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
}
- ark_ddm_reset(ark->ddm.v);
- ark_ddm_stats_reset(ark->ddm.v);
-
- ark_ddm_stop(ark->ddm.v, 0);
- ark_rqp_stats_reset(ark->rqpacing);
+ if (ark->rqpacing)
+ ark_rqp_stats_reset(ark->rqpacing);
return 0;
}
return 0;
}
-static void *
-delay_pg_start(void *arg)
-{
- struct ark_adapter *ark = (struct ark_adapter *)arg;
-
- /* This function is used exclusively for regression testing, We
- * perform a blind sleep here to ensure that the external test
- * application has time to setup the test before we generate packets
- */
- usleep(100000);
- ark_pktgen_run(ark->pg);
- return NULL;
-}
-
static int
eth_ark_dev_start(struct rte_eth_dev *dev)
{
for (i = 0; i < dev->data->nb_tx_queues; i++)
eth_ark_tx_queue_start(dev, i);
- /* start DDM */
- ark_ddm_start(ark->ddm.v);
-
ark->started = 1;
/* set xmit and receive function */
dev->rx_pkt_burst = ð_ark_recv_pkts;
if (ark->start_pg)
ark_pktchkr_run(ark->pc);
- if (ark->start_pg && (dev->data->port_id == 0)) {
+ if (ark->start_pg && !ark->pg_running) {
pthread_t thread;
/* Delay packet generatpr start allow the hardware to be ready
* This is only used for sanity checking with internal generator
*/
- if (pthread_create(&thread, NULL, delay_pg_start, ark)) {
+ char tname[32];
+ snprintf(tname, sizeof(tname), "ark-delay-pg-%d",
+ dev->data->port_id);
+
+ if (rte_ctrl_thread_create(&thread, tname, NULL,
+ ark_pktgen_delay_start, ark->pg)) {
ARK_PMD_LOG(ERR, "Could not create pktgen "
"starter thread\n");
return -1;
}
+ ark->pg_running = 1;
}
if (ark->user_ext.dev_start)
ark->user_data[dev->data->port_id]);
/* Stop the packet generator */
- if (ark->start_pg)
+ if (ark->start_pg && ark->pg_running) {
ark_pktgen_pause(ark->pg);
+ ark->pg_running = 0;
+ }
- dev->rx_pkt_burst = ð_ark_recv_pkts_noop;
- dev->tx_pkt_burst = ð_ark_xmit_pkts_noop;
+ dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
/* STOP TX Side */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
}
}
- /* Stop DDM */
- /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */
- for (i = 0; i < 10; i++) {
- status = ark_ddm_stop(ark->ddm.v, 1);
- if (status == 0)
- break;
- }
- if (status || i != 0) {
- ARK_PMD_LOG(ERR, "DDM stop anomaly. status:"
- " %d iter: %u. (%s)\n",
- status,
- i,
- __func__);
- ark_ddm_dump(ark->ddm.v, "Stop anomaly");
-
- mpu = ark->mputx.v;
- for (i = 0; i < ark->tx_queues; i++) {
- ark_mpu_dump(mpu, "DDM failure dump", i);
- mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
- }
- }
-
/* STOP RX Side */
/* Stop UDM multiple tries attempted */
for (i = 0; i < 10; i++) {
/*
* TODO This should only be called once for the device during shutdown
*/
- ark_rqp_dump(ark->rqpacing);
+ if (ark->rqpacing)
+ ark_rqp_dump(ark->rqpacing);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
eth_ark_tx_queue_release(dev->data->tx_queues[i]);
.nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
/* ARK PMD supports all line rates, how do we indicate that here ?? */
- dev_info->speed_capa = (ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_50G |
- ETH_LINK_SPEED_100G);
+ dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_50G |
+ RTE_ETH_LINK_SPEED_100G);
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
return 0;
}
ARK_PKTGEN_ARG "=<filename> "
ARK_PKTCHKR_ARG "=<filename> "
ARK_PKTDIR_ARG "=<bitmap>");
-RTE_LOG_REGISTER(ark_logtype, pmd.net.ark, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(ark_logtype, NOTICE);