ark_ddm_verify(struct ark_ddm_t *ddm)
{
uint32_t hw_const;
+ uint32_t hw_ver;
if (sizeof(struct ark_ddm_t) != ARK_DDM_EXPECTED_SIZE) {
ARK_PMD_LOG(ERR, "DDM structure looks incorrect %d vs %zd\n",
ARK_DDM_EXPECTED_SIZE, sizeof(struct ark_ddm_t));
return -1;
}
- hw_const = ddm->cfg.const0;
- if (hw_const == ARK_DDM_CONST3)
+ hw_const = ddm->cfg.idnum;
+ hw_ver = ddm->cfg.vernum;
+ if (hw_const == ARK_DDM_MODID && hw_ver == ARK_DDM_MODVER)
return 0;
- if (hw_const == ARK_DDM_CONST1) {
- ARK_PMD_LOG(ERR,
- "ARK: DDM module is version 1, "
- "PMD expects version 2\n");
- return -1;
- }
-
- if (hw_const == ARK_DDM_CONST2) {
- ARK_PMD_LOG(ERR,
- "ARK: DDM module is version 2, "
- "PMD expects version 3\n");
- return -1;
- }
ARK_PMD_LOG(ERR,
- "ARK: DDM module not found as expected 0x%08x\n",
- ddm->cfg.const0);
+ "ARK: DDM module not found as expected"
+ " id: %08x ver: %08x\n",
+ hw_const, hw_ver);
return -1;
}
void
-ark_ddm_start(struct ark_ddm_t *ddm)
-{
- ddm->cfg.command = 1;
-}
-
-int
-ark_ddm_stop(struct ark_ddm_t *ddm, const int wait)
+ark_ddm_queue_enable(struct ark_ddm_t *ddm, int enable)
{
- int cnt = 0;
-
- ddm->cfg.command = 2;
- rte_wmb();
- while (wait && (ddm->cfg.stop_flushed & 0x01) == 0) {
- if (cnt++ > 1000)
- return 1;
-
- usleep(10);
- }
- return 0;
+ ddm->setup.qcommand = enable ? 1U : 0U;
}
void
-ark_ddm_reset(struct ark_ddm_t *ddm)
-{
- int status;
-
- /* reset only works if ddm has stopped properly. */
- status = ark_ddm_stop(ddm, 1);
-
- if (status != 0) {
- ARK_PMD_LOG(NOTICE, "%s stop failed doing forced reset\n",
- __func__);
- ddm->cfg.command = 4;
- usleep(10);
- }
- ddm->cfg.command = 3;
-}
-
-void
-ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr, uint32_t interval)
+ark_ddm_queue_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr)
{
ddm->setup.cons_write_index_addr = cons_addr;
- ddm->setup.write_index_interval = interval / 4; /* 4 ns period */
+ ddm->setup.cons_index = 0;
}
+/* Global stats clear */
void
ark_ddm_stats_reset(struct ark_ddm_t *ddm)
{
ddm->cfg.tlp_stats_clear = 1;
}
-void
-ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg)
-{
- ARK_PMD_LOG(DEBUG, "%s Stopped: %d\n", msg,
- ark_ddm_is_stopped(ddm)
- );
-}
-
void
ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg)
{
"MBufs", stats->tx_mbuf_count);
}
-int
-ark_ddm_is_stopped(struct ark_ddm_t *ddm)
-{
- return (ddm->cfg.stop_flushed & 0x01) != 0;
-}
-
uint64_t
ark_ddm_queue_byte_count(struct ark_ddm_t *ddm)
{
*/
#define ARK_DDM_CFG 0x0000
/* Set unique HW ID for hardware version */
-#define ARK_DDM_CONST3 (0x334d4444)
-#define ARK_DDM_CONST2 (0x324d4444)
-#define ARK_DDM_CONST1 (0xfacecafe)
+#define ARK_DDM_MODID 0x204d4444
+#define ARK_DDM_MODVER 0x37313232
struct ark_ddm_cfg_t {
+ union {
+ char id[4];
+ uint32_t idnum;
+ };
+ union {
+ char ver[4];
+ uint32_t vernum;
+ volatile uint32_t tlp_stats_clear;
+ };
uint32_t r0;
- volatile uint32_t tlp_stats_clear;
- uint32_t const0;
volatile uint32_t tag_max;
volatile uint32_t command;
- volatile uint32_t stop_flushed;
+ uint32_t write_index_interval; /* 4ns each */
+ volatile uint64_t qflow;
};
#define ARK_DDM_STATS 0x0020
volatile uint64_t tx_mbuf_count;
};
-#define ARK_DDM_MRDQ 0x0040
-struct ark_ddm_mrdq_t {
- volatile uint32_t mrd_q1;
- volatile uint32_t mrd_q2;
- volatile uint32_t mrd_q3;
- volatile uint32_t mrd_q4;
- volatile uint32_t mrd_full;
-};
-
-#define ARK_DDM_CPLDQ 0x0068
-struct ark_ddm_cpldq_t {
- volatile uint32_t cpld_q1;
- volatile uint32_t cpld_q2;
- volatile uint32_t cpld_q3;
- volatile uint32_t cpld_q4;
- volatile uint32_t cpld_full;
-};
-
-#define ARK_DDM_MRD_PS 0x0090
-struct ark_ddm_mrd_ps_t {
- volatile uint32_t mrd_ps_min;
- volatile uint32_t mrd_ps_max;
- volatile uint32_t mrd_full_ps_min;
- volatile uint32_t mrd_full_ps_max;
- volatile uint32_t mrd_dw_ps_min;
- volatile uint32_t mrd_dw_ps_max;
-};
-
#define ARK_DDM_QUEUE_STATS 0x00a8
struct ark_ddm_qstats_t {
volatile uint64_t byte_count;
volatile uint64_t mbuf_count;
};
-#define ARK_DDM_CPLD_PS 0x00c0
-struct ark_ddm_cpld_ps_t {
- volatile uint32_t cpld_ps_min;
- volatile uint32_t cpld_ps_max;
- volatile uint32_t cpld_full_ps_min;
- volatile uint32_t cpld_full_ps_max;
- volatile uint32_t cpld_dw_ps_min;
- volatile uint32_t cpld_dw_ps_max;
-};
-
#define ARK_DDM_SETUP 0x00e0
struct ark_ddm_setup_t {
rte_iova_t cons_write_index_addr;
- uint32_t write_index_interval; /* 4ns each */
+ volatile uint32_t qcommand;
volatile uint32_t cons_index;
};
struct ark_ddm_cfg_t cfg;
uint8_t reserved0[(ARK_DDM_STATS - ARK_DDM_CFG) -
sizeof(struct ark_ddm_cfg_t)];
+
struct ark_ddm_stats_t stats;
- uint8_t reserved1[(ARK_DDM_MRDQ - ARK_DDM_STATS) -
+ uint8_t reserved1[(ARK_DDM_QUEUE_STATS - ARK_DDM_STATS) -
sizeof(struct ark_ddm_stats_t)];
- struct ark_ddm_mrdq_t mrdq;
- uint8_t reserved2[(ARK_DDM_CPLDQ - ARK_DDM_MRDQ) -
- sizeof(struct ark_ddm_mrdq_t)];
- struct ark_ddm_cpldq_t cpldq;
- uint8_t reserved3[(ARK_DDM_MRD_PS - ARK_DDM_CPLDQ) -
- sizeof(struct ark_ddm_cpldq_t)];
- struct ark_ddm_mrd_ps_t mrd_ps;
+
struct ark_ddm_qstats_t queue_stats;
- struct ark_ddm_cpld_ps_t cpld_ps;
- uint8_t reserved5[(ARK_DDM_SETUP - ARK_DDM_CPLD_PS) -
- sizeof(struct ark_ddm_cpld_ps_t)];
+ uint8_t reserved5[(ARK_DDM_SETUP - ARK_DDM_QUEUE_STATS) -
+ sizeof(struct ark_ddm_qstats_t)];
+
struct ark_ddm_setup_t setup;
uint8_t reserved_p[(ARK_DDM_EXPECTED_SIZE - ARK_DDM_SETUP) -
sizeof(struct ark_ddm_setup_t)];
};
-
/* DDM function prototype */
int ark_ddm_verify(struct ark_ddm_t *ddm);
-void ark_ddm_start(struct ark_ddm_t *ddm);
-int ark_ddm_stop(struct ark_ddm_t *ddm, const int wait);
-void ark_ddm_reset(struct ark_ddm_t *ddm);
void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
-void ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr,
- uint32_t interval);
+void ark_ddm_queue_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr);
void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
-void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
-int ark_ddm_is_stopped(struct ark_ddm_t *ddm);
uint64_t ark_ddm_queue_byte_count(struct ark_ddm_t *ddm);
uint64_t ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm);
void ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm);
+void ark_ddm_queue_enable(struct ark_ddm_t *ddm, int enable);
#endif
mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
}
- /* TX -- DDM */
- if (ark_ddm_stop(ark->ddm.v, 1))
- ARK_PMD_LOG(ERR, "Unable to stop DDM\n");
-
mpu = ark->mputx.v;
num_q = ark_api_num_queues(mpu);
ark->tx_queues = num_q;
mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
}
- ark_ddm_reset(ark->ddm.v);
- ark_ddm_stats_reset(ark->ddm.v);
-
- ark_ddm_stop(ark->ddm.v, 0);
if (ark->rqpacing)
ark_rqp_stats_reset(ark->rqpacing);
for (i = 0; i < dev->data->nb_tx_queues; i++)
eth_ark_tx_queue_start(dev, i);
- /* start DDM */
- ark_ddm_start(ark->ddm.v);
-
ark->started = 1;
/* set xmit and receive function */
dev->rx_pkt_burst = ð_ark_recv_pkts;
}
}
- /* Stop DDM */
- /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */
- for (i = 0; i < 10; i++) {
- status = ark_ddm_stop(ark->ddm.v, 1);
- if (status == 0)
- break;
- }
- if (status || i != 0) {
- ARK_PMD_LOG(ERR, "DDM stop anomaly. status:"
- " %d iter: %u. (%s)\n",
- status,
- i,
- __func__);
- ark_ddm_dump(ark->ddm.v, "Stop anomaly");
-
- mpu = ark->mputx.v;
- for (i = 0; i < ark->tx_queues; i++) {
- ark_mpu_dump(mpu, "DDM failure dump", i);
- mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
- }
- }
-
/* STOP RX Side */
/* Stop UDM multiple tries attempted */
for (i = 0; i < 10; i++) {
static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
static void free_completed_tx(struct ark_tx_queue *queue);
-static inline void
-ark_tx_hw_queue_stop(struct ark_tx_queue *queue)
-{
- ark_mpu_stop(queue->mpu);
-}
-
/* ************************************************************************* */
static inline void
eth_ark_tx_desc_fill(struct ark_tx_queue *queue,
eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
{
rte_iova_t queue_base, ring_base, cons_index_addr;
- uint32_t write_interval_ns;
/* Verify HW -- MPU */
if (ark_mpu_verify(queue->mpu, sizeof(union ark_tx_meta)))
/* Stop and Reset and configure MPU */
ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1);
- /*
- * Adjust the write interval based on queue size --
- * increase pcie traffic when low mbuf count
- * Queue sizes less than 128 are not allowed
- */
- switch (queue->queue_size) {
- case 128:
- write_interval_ns = 500;
- break;
- case 256:
- write_interval_ns = 500;
- break;
- case 512:
- write_interval_ns = 1000;
- break;
- default:
- write_interval_ns = 2000;
- break;
- }
-
/* Completion address in UDM */
- ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns);
+ ark_ddm_queue_setup(queue->ddm, cons_index_addr);
+ ark_ddm_queue_reset_stats(queue->ddm);
return 0;
}
queue = (struct ark_tx_queue *)vtx_queue;
- ark_tx_hw_queue_stop(queue);
+ ark_ddm_queue_enable(queue->ddm, 0);
+ ark_mpu_stop(queue->mpu);
queue->cons_index = queue->prod_index;
free_completed_tx(queue);
return -1;
}
+ ark_ddm_queue_enable(queue->ddm, 0);
ark_mpu_stop(queue->mpu);
free_completed_tx(queue);
return 0;
ark_mpu_start(queue->mpu);
+ ark_ddm_queue_enable(queue->ddm, 1);
dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;