if (ark_ddm_verify(ark->ddm.v))
return -1;
- /* UDM */
- if (ark_udm_reset(ark->udm.v)) {
- ARK_PMD_LOG(ERR, "Unable to stop and reset UDM\n");
- return -1;
- }
- /* Keep in reset until the MPU are cleared */
-
/* MPU reset */
mpu = ark->mpurx.v;
num_q = ark_api_num_queues(mpu);
int i;
/* RX Side */
- /* start UDM */
- ark_udm_start(ark->udm.v);
-
for (i = 0; i < dev->data->nb_rx_queues; i++)
eth_ark_rx_start_queue(dev, i);
uint16_t i;
int status;
struct ark_adapter *ark = dev->data->dev_private;
- struct ark_mpu_t *mpu;
if (ark->started == 0)
return 0;
dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
+ /* Stop RX Side */
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_ark_rx_stop_queue(dev, i);
+
/* STOP TX Side */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
status = eth_ark_tx_queue_stop(dev, i);
}
}
- /* STOP RX Side */
- /* Stop UDM multiple tries attempted */
- for (i = 0; i < 10; i++) {
- status = ark_udm_stop(ark->udm.v, 1);
- if (status == 0)
- break;
- }
- if (status || i != 0) {
- ARK_PMD_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n",
- status, i, __func__);
- ark_udm_dump(ark->udm.v, "Stop anomaly");
-
- mpu = ark->mpurx.v;
- for (i = 0; i < ark->rx_queues; i++) {
- ark_mpu_dump(mpu, "UDM Stop anomaly", i);
- mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
- }
- }
-
ark_udm_dump_stats(ark->udm.v, "Post stop");
- ark_udm_dump_perf(ark->udm.v, "Post stop");
for (i = 0; i < dev->data->nb_rx_queues; i++)
eth_ark_rx_dump_queue(dev, i, __func__);
ark->user_data[dev->data->port_id]);
eth_ark_dev_stop(dev);
- eth_ark_udm_force_close(dev);
/*
- * TODO This should only be called once for the device during shutdown
+ * This should only be called once for the device during shutdown
*/
if (ark->rqpacing)
ark_rqp_dump(ark->rqpacing);
queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
/* Configure UDM per queue */
- ark_udm_stop(queue->udm, 0);
ark_udm_configure(queue->udm,
RTE_PKTMBUF_HEADROOM,
- queue->dataroom,
- ARK_RX_WRITE_TIME_NS);
- ark_udm_stats_reset(queue->udm);
- ark_udm_stop(queue->udm, 0);
+ queue->dataroom);
+ ark_udm_queue_stats_reset(queue->udm);
/* populate mbuf reserve */
status = eth_ark_rx_seed_mbufs(queue);
ark_udm_queue_stats_reset(queue->udm);
}
-void
-eth_ark_udm_force_close(struct rte_eth_dev *dev)
-{
- struct ark_adapter *ark = dev->data->dev_private;
- struct ark_rx_queue *queue;
- uint32_t index;
- uint16_t i;
-
- if (!ark_udm_is_flushed(ark->udm.v)) {
- /* restart the MPUs */
- ARK_PMD_LOG(NOTICE, "UDM not flushed -- forcing flush\n");
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- queue = (struct ark_rx_queue *)dev->data->rx_queues[i];
- if (queue == 0)
- continue;
-
- ark_mpu_start(queue->mpu);
- /* Add some buffers */
- index = ARK_RX_MPU_CHUNK + queue->seed_index;
- ark_mpu_set_producer(queue->mpu, index);
- }
- /* Wait to allow data to pass */
- usleep(100);
-
- ARK_PMD_LOG(NOTICE, "UDM forced flush attempt, stopped = %d\n",
- ark_udm_is_flushed(ark->udm.v));
- }
- ark_udm_reset(ark->udm.v);
-}
-
static void
ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue)
{
ark_mpu_dump(queue->mpu, name, queue->phys_qid);
ark_mpu_dump_setup(queue->mpu, queue->phys_qid);
- ark_udm_dump(queue->udm, name);
ark_udm_dump_setup(queue->udm, queue->phys_qid);
}
void eth_rx_queue_stats_reset(void *vqueue);
void eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
const char *msg);
-void eth_ark_udm_force_close(struct rte_eth_dev *dev);
#endif
int
ark_udm_verify(struct ark_udm_t *udm)
{
+ uint32_t idnum = udm->setup.idnum;
+ uint32_t vernum = udm->setup.vernum;
if (sizeof(struct ark_udm_t) != ARK_UDM_EXPECT_SIZE) {
ARK_PMD_LOG(ERR,
"ARK: UDM structure looks incorrect %d vs %zd\n",
return -1;
}
- if (udm->setup.const0 != ARK_UDM_CONST) {
+ if (idnum != ARK_UDM_MODID || vernum != ARK_UDM_MODVER) {
ARK_PMD_LOG(ERR,
- "ARK: UDM module not found as expected 0x%08x\n",
- udm->setup.const0);
+ "ARK: UDM module not found as expected 0x%08x 0x%08x\n",
+ idnum, vernum);
return -1;
}
return 0;
}
-int
-ark_udm_stop(struct ark_udm_t *udm, const int wait)
-{
- int cnt = 0;
-
- udm->setup.r0 = 0;
- udm->cfg.command = 2;
- rte_wmb();
-
- while (wait && (udm->cfg.stop_flushed & 0x01) == 0) {
- if (cnt++ > 1000)
- return 1;
-
- usleep(10);
- }
- return 0;
-}
-
-int
-ark_udm_reset(struct ark_udm_t *udm)
-{
- int status;
-
- status = ark_udm_stop(udm, 1);
- if (status != 0) {
- ARK_PMD_LOG(NOTICE, "%s stop failed doing forced reset\n",
- __func__);
- udm->cfg.command = 4;
- usleep(10);
- udm->cfg.command = 3;
- status = ark_udm_stop(udm, 0);
- ARK_PMD_LOG(INFO, "%s stop status %d post failure"
- " and forced reset\n",
- __func__, status);
- } else {
- udm->cfg.command = 3;
- }
-
- return status;
-}
-
-void
-ark_udm_start(struct ark_udm_t *udm)
-{
- udm->setup.r0 = 0x100;
- udm->cfg.command = 1;
-}
-
-void
-ark_udm_stats_reset(struct ark_udm_t *udm)
-{
- udm->pcibp.pci_clear = 1;
- udm->tlp_ps.tlp_clear = 1;
-}
-
void
ark_udm_configure(struct ark_udm_t *udm,
uint32_t headroom,
- uint32_t dataroom,
- uint32_t write_interval_ns)
+ uint32_t dataroom)
{
/* headroom and data room are in DWords in the UDM */
udm->cfg.dataroom = dataroom / 4;
udm->cfg.headroom = headroom / 4;
-
- /* 4 NS period ns */
- udm->rt_cfg.write_interval = write_interval_ns / 4;
}
void
ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr)
{
udm->rt_cfg.hw_prod_addr = addr;
-}
-
-int
-ark_udm_is_flushed(struct ark_udm_t *udm)
-{
- return (udm->cfg.stop_flushed & 0x01) != 0;
+ udm->rt_cfg.prod_idx = 0;
}
uint64_t
ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg)
{
ARK_PMD_LOG(INFO, "UDM Stats: %s"
- ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 "\n",
+ ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 "\n",
msg,
"Pkts Received", udm->stats.rx_packet_count,
"Pkts Finalized", udm->stats.rx_sent_packets,
- "Pkts Dropped", udm->tlp.pkt_drop,
"Bytes Count", udm->stats.rx_byte_count,
"MBuf Count", udm->stats.rx_mbuf_count);
}
"MBuf Count", udm->qstats.q_mbuf_count);
}
-void
-ark_udm_dump(struct ark_udm_t *udm, const char *msg)
-{
- ARK_PMD_LOG(DEBUG, "UDM Dump: %s Stopped: %d\n", msg,
- udm->cfg.stop_flushed);
-}
-
void
ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id)
{
"prod_idx", udm->rt_cfg.prod_idx);
}
-void
-ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg)
-{
- struct ark_udm_pcibp_t *bp = &udm->pcibp;
-
- ARK_PMD_LOG(INFO, "UDM Performance %s"
- ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
- "\n",
- msg,
- "PCI Empty", bp->pci_empty,
- "PCI Q1", bp->pci_q1,
- "PCI Q2", bp->pci_q2,
- "PCI Q3", bp->pci_q3,
- "PCI Q4", bp->pci_q4,
- "PCI Full", bp->pci_full);
-}
-
void
ark_udm_queue_stats_reset(struct ark_udm_t *udm)
{
#define ARK_RX_WRITE_TIME_NS 2500
#define ARK_UDM_SETUP 0
-#define ARK_UDM_CONST2 0xbACECACE
-#define ARK_UDM_CONST3 0x344d4455
-#define ARK_UDM_CONST ARK_UDM_CONST3
+#define ARK_UDM_MODID 0x4d445500
+#define ARK_UDM_MODVER 0x37313232
+
struct ark_udm_setup_t {
+ union {
+ char id[4];
+ uint32_t idnum;
+ };
+ union {
+ char ver[4];
+ uint32_t vernum;
+ };
uint32_t r0;
- uint32_t r4;
- volatile uint32_t cycle_count;
uint32_t const0;
};
#define ARK_UDM_CFG 0x010
struct ark_udm_cfg_t {
- volatile uint32_t stop_flushed; /* RO */
+ uint32_t write_interval; /* 4ns cycles */
volatile uint32_t command;
uint32_t dataroom;
uint32_t headroom;
uint32_t q_enable;
};
-#define ARK_UDM_TLP 0x0070
-struct ark_udm_tlp_t {
- volatile uint64_t pkt_drop; /* global */
- volatile uint32_t tlp_q1;
- volatile uint32_t tlp_q2;
- volatile uint32_t tlp_q3;
- volatile uint32_t tlp_q4;
- volatile uint32_t tlp_full;
-};
-
-#define ARK_UDM_PCIBP 0x00a0
-struct ark_udm_pcibp_t {
- volatile uint32_t pci_clear;
- volatile uint32_t pci_empty;
- volatile uint32_t pci_q1;
- volatile uint32_t pci_q2;
- volatile uint32_t pci_q3;
- volatile uint32_t pci_q4;
- volatile uint32_t pci_full;
-};
-
-#define ARK_UDM_TLP_PS 0x00bc
-struct ark_udm_tlp_ps_t {
- volatile uint32_t tlp_clear;
- volatile uint32_t tlp_ps_min;
- volatile uint32_t tlp_ps_max;
- volatile uint32_t tlp_full_ps_min;
- volatile uint32_t tlp_full_ps_max;
- volatile uint32_t tlp_dw_ps_min;
- volatile uint32_t tlp_dw_ps_max;
- volatile uint32_t tlp_pldw_ps_min;
- volatile uint32_t tlp_pldw_ps_max;
-};
-
#define ARK_UDM_RT_CFG 0x00e0
struct ark_udm_rt_cfg_t {
rte_iova_t hw_prod_addr;
- uint32_t write_interval; /* 4ns cycles */
- volatile uint32_t prod_idx; /* RO */
+ uint32_t reserved;
+ volatile uint32_t prod_idx; /* Updated by HW */
};
/* Consolidated structure */
struct ark_udm_cfg_t cfg;
struct ark_udm_stats_t stats;
struct ark_udm_queue_stats_t qstats;
- uint8_t reserved1[(ARK_UDM_TLP - ARK_UDM_PQ) -
+ uint8_t reserved1[(ARK_UDM_RT_CFG - ARK_UDM_PQ) -
sizeof(struct ark_udm_queue_stats_t)];
- struct ark_udm_tlp_t tlp;
- uint8_t reserved2[(ARK_UDM_PCIBP - ARK_UDM_TLP) -
- sizeof(struct ark_udm_tlp_t)];
- struct ark_udm_pcibp_t pcibp;
- struct ark_udm_tlp_ps_t tlp_ps;
struct ark_udm_rt_cfg_t rt_cfg;
int8_t reserved3[(ARK_UDM_EXPECT_SIZE - ARK_UDM_RT_CFG) -
sizeof(struct ark_udm_rt_cfg_t)];
int ark_udm_verify(struct ark_udm_t *udm);
-int ark_udm_stop(struct ark_udm_t *udm, int wait);
-void ark_udm_start(struct ark_udm_t *udm);
-int ark_udm_reset(struct ark_udm_t *udm);
void ark_udm_configure(struct ark_udm_t *udm,
uint32_t headroom,
- uint32_t dataroom,
- uint32_t write_interval_ns);
+ uint32_t dataroom);
void ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr);
-void ark_udm_stats_reset(struct ark_udm_t *udm);
void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
uint16_t qid);
-void ark_udm_dump(struct ark_udm_t *udm, const char *msg);
-void ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg);
void ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id);
-int ark_udm_is_flushed(struct ark_udm_t *udm);
/* Per queue data */
uint64_t ark_udm_dropped(struct ark_udm_t *udm);