lio_droq_reset_indices(droq);
}
-static void *
-lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no)
-{
- struct lio_droq *droq = lio_dev->droq[q_no];
- struct rte_mempool *mpool = droq->mpool;
- struct rte_mbuf *m;
-
- m = rte_pktmbuf_alloc(mpool);
- if (m == NULL) {
- lio_dev_err(lio_dev, "Cannot allocate\n");
- return NULL;
- }
-
- rte_mbuf_refcnt_set(m, 1);
- m->next = NULL;
- m->data_off = RTE_PKTMBUF_HEADROOM;
- m->nb_segs = 1;
- m->pool = mpool;
-
- return m;
-}
-
static int
lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
struct lio_droq *droq)
void *buf;
for (i = 0; i < droq->max_count; i++) {
- buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ buf = rte_pktmbuf_alloc(droq->mpool);
if (buf == NULL) {
lio_dev_err(lio_dev, "buffer alloc failed\n");
+ droq->stats.rx_alloc_failure++;
lio_droq_destroy_ring_buffers(droq);
return -ENOMEM;
}
if (droq->info_mz == NULL)
return NULL;
- droq->info_list_dma = droq->info_mz->phys_addr;
+ droq->info_list_dma = droq->info_mz->iova;
droq->info_alloc_size = droq->info_mz->len;
droq->info_base_addr = (size_t)droq->info_mz->addr;
return -1;
}
- droq->desc_ring_dma = droq->desc_ring_mz->phys_addr;
+ droq->desc_ring_dma = droq->desc_ring_mz->iova;
droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
/* lio_droq_refill
*
- * @param lio_dev - pointer to the lio device structure
* @param droq - droq in which descriptors require new buffers.
*
* Description:
* This routine is called with droq->lock held.
*/
static uint32_t
-lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq)
+lio_droq_refill(struct lio_droq *droq)
{
struct lio_droq_desc *desc_ring;
uint32_t desc_refilled = 0;
* reuse the buffer, else allocate.
*/
if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
- buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ buf = rte_pktmbuf_alloc(droq->mpool);
/* If a buffer could not be allocated, no point in
* continuing
*/
- if (buf == NULL)
+ if (buf == NULL) {
+ droq->stats.rx_alloc_failure++;
break;
+ }
droq->recv_buf_list[droq->refill_idx].buffer = buf;
}
droq->refill_count++;
if (likely(nicbuf != NULL)) {
- nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
- nicbuf->nb_segs = 1;
- nicbuf->next = NULL;
/* We don't have a way to pass flags yet */
nicbuf->ol_flags = 0;
if (rh->r_dh.has_hash) {
if (!pkt_len)
first_buf = nicbuf;
- nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
- nicbuf->nb_segs = 1;
- nicbuf->next = NULL;
nicbuf->port = lio_dev->port_id;
/* We don't have a way to pass
* flags yet
}
if (droq->refill_count >= droq->refill_threshold) {
- int desc_refilled = lio_droq_refill(lio_dev, droq);
+ int desc_refilled = lio_droq_refill(droq);
/* Flush the droq descriptor data to memory to be sure
* that when we update the credits the data in memory is
info->length = 0;
info->rh.rh64 = 0;
+ droq->stats.pkts_received++;
+ droq->stats.rx_pkts_received += data_pkts;
+ droq->stats.rx_bytes_received += data_total_len;
+ droq->stats.bytes_received += total_len;
+
return data_pkts;
}
return -1;
}
- iq->base_addr_dma = iq->iq_mz->phys_addr;
+ iq->base_addr_dma = iq->iq_mz->iova;
iq->base_addr = (uint8_t *)iq->iq_mz->addr;
iq->max_count = num_descs;
return -1;
}
+int
+lio_wait_for_instr_fetch(struct lio_device *lio_dev)
+{
+ int pending, instr_cnt;
+ int i, retry = 1000;
+
+ do {
+ instr_cnt = 0;
+
+ for (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) {
+ if (!(lio_dev->io_qmask.iq & (1ULL << i)))
+ continue;
+
+ if (lio_dev->instr_queue[i] == NULL)
+ break;
+
+ pending = rte_atomic64_read(
+ &lio_dev->instr_queue[i]->instr_pending);
+ if (pending)
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[i]);
+
+ instr_cnt += pending;
+ }
+
+ if (instr_cnt == 0)
+ break;
+
+ rte_delay_ms(1);
+
+ } while (retry-- && instr_cnt);
+
+ return instr_cnt;
+}
+
static inline void
lio_ring_doorbell(struct lio_device *lio_dev,
struct lio_instr_queue *iq)
inst_processed = lio_process_iq_request_list(lio_dev, iq);
- if (inst_processed)
+ if (inst_processed) {
rte_atomic64_sub(&iq->instr_pending, inst_processed);
+ iq->stats.instr_processed += inst_processed;
+ }
tot_inst_processed += inst_processed;
inst_processed = 0;
static int
lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
- void *buf, uint32_t datasize __rte_unused, uint32_t reqtype)
+ void *buf, uint32_t datasize, uint32_t reqtype)
{
struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
struct lio_iq_post_status st;
if (st.status != LIO_IQ_SEND_FAILED) {
lio_add_to_request_list(iq, st.index, buf, reqtype);
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent,
+ datasize);
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1);
+
lio_ring_doorbell(lio_dev, iq);
+ } else {
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1);
}
rte_spinlock_unlock(&iq->post_lock);
sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
- sc->dma_addr = rte_mbuf_data_dma_addr(m);
+ sc->dma_addr = rte_mbuf_data_iova(m);
sc->mbuf = m;
dma_addr = sc->dma_addr;
return count ? 0 : 1;
}
+static void
+lio_ctrl_cmd_callback(uint32_t status __rte_unused, void *sc_ptr)
+{
+ struct lio_soft_command *sc = sc_ptr;
+ struct lio_dev_ctrl_cmd *ctrl_cmd;
+ struct lio_ctrl_pkt *ctrl_pkt;
+
+ ctrl_pkt = (struct lio_ctrl_pkt *)sc->ctxptr;
+ ctrl_cmd = ctrl_pkt->ctrl_cmd;
+ ctrl_cmd->cond = 1;
+
+ lio_free_soft_command(sc);
+}
+
+static inline struct lio_soft_command *
+lio_alloc_ctrl_pkt_sc(struct lio_device *lio_dev,
+ struct lio_ctrl_pkt *ctrl_pkt)
+{
+ struct lio_soft_command *sc = NULL;
+ uint32_t uddsize, datasize;
+ uint32_t rdatasize;
+ uint8_t *data;
+
+ uddsize = (uint32_t)(ctrl_pkt->ncmd.s.more * 8);
+
+ datasize = OCTEON_CMD_SIZE + uddsize;
+ rdatasize = (ctrl_pkt->wait_time) ? 16 : 0;
+
+ sc = lio_alloc_soft_command(lio_dev, datasize,
+ rdatasize, sizeof(struct lio_ctrl_pkt));
+ if (sc == NULL)
+ return NULL;
+
+ rte_memcpy(sc->ctxptr, ctrl_pkt, sizeof(struct lio_ctrl_pkt));
+
+ data = (uint8_t *)sc->virtdptr;
+
+ rte_memcpy(data, &ctrl_pkt->ncmd, OCTEON_CMD_SIZE);
+
+ lio_swap_8B_data((uint64_t *)data, OCTEON_CMD_SIZE >> 3);
+
+ if (uddsize) {
+ /* Endian-Swap for UDD should have been done by caller. */
+ rte_memcpy(data + OCTEON_CMD_SIZE, ctrl_pkt->udd, uddsize);
+ }
+
+ sc->iq_no = (uint32_t)ctrl_pkt->iq_no;
+
+ lio_prepare_soft_command(lio_dev, sc,
+ LIO_OPCODE, LIO_OPCODE_CMD,
+ 0, 0, 0);
+
+ sc->callback = lio_ctrl_cmd_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = ctrl_pkt->wait_time;
+
+ return sc;
+}
+
+int
+lio_send_ctrl_pkt(struct lio_device *lio_dev, struct lio_ctrl_pkt *ctrl_pkt)
+{
+ struct lio_soft_command *sc = NULL;
+ int retval;
+
+ sc = lio_alloc_ctrl_pkt_sc(lio_dev, ctrl_pkt);
+ if (sc == NULL) {
+ lio_dev_err(lio_dev, "soft command allocation failed\n");
+ return -1;
+ }
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_free_soft_command(sc);
+ lio_dev_err(lio_dev, "Port: %d soft command: %d send failed status: %x\n",
+ lio_dev->port_id, ctrl_pkt->ncmd.s.cmd, retval);
+ return -1;
+ }
+
+ return retval;
+}
+
/** Send data packet to the device
* @param lio_dev - lio device pointer
* @param ndata - control structure with queueing, and buffer information
struct lio_instr_queue *txq = tx_queue;
union lio_cmd_setup cmdsetup;
struct lio_device *lio_dev;
+ struct lio_iq_stats *stats;
struct lio_data_pkt ndata;
int i, processed = 0;
struct rte_mbuf *m;
lio_dev = txq->lio_dev;
iq_no = txq->txpciq.s.q_no;
+ stats = &lio_dev->instr_queue[iq_no]->stats;
if (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) {
PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n",
ndata.q_no = iq_no;
if (lio_iq_is_full(lio_dev, ndata.q_no)) {
+ stats->tx_iq_busy++;
if (lio_dev_cleanup_iq(lio_dev, iq_no)) {
PMD_TX_LOG(lio_dev, ERR,
"Transmit failed iq:%d full\n",
if (m->ol_flags & PKT_TX_IP_CKSUM)
cmdsetup.s.ip_csum = 1;
- if ((m->ol_flags & PKT_TX_TCP_CKSUM) ||
+ if (m->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cmdsetup.s.tnl_csum = 1;
+ else if ((m->ol_flags & PKT_TX_TCP_CKSUM) ||
(m->ol_flags & PKT_TX_UDP_CKSUM))
cmdsetup.s.transport_csum = 1;
cmdsetup.s.u.datasize = pkt_len;
lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
&cmdsetup, tag);
- ndata.cmd.cmd3.dptr = rte_mbuf_data_dma_addr(m);
+ ndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m);
ndata.reqtype = LIO_REQTYPE_NORESP_NET;
} else {
struct lio_buf_free_info *finfo;
&cmdsetup, tag);
memset(g->sg, 0, g->sg_size);
- g->sg[0].ptr[0] = rte_mbuf_data_dma_addr(m);
+ g->sg[0].ptr[0] = rte_mbuf_data_iova(m);
lio_add_sg_size(&g->sg[0], m->data_len, 0);
pkt_len = m->data_len;
finfo->mbuf = m;
m = m->next;
while (frags--) {
g->sg[(i >> 2)].ptr[(i & 3)] =
- rte_mbuf_data_dma_addr(m);
+ rte_mbuf_data_iova(m);
lio_add_sg_size(&g->sg[(i >> 2)],
m->data_len, (i & 3));
pkt_len += m->data_len;
m = m->next;
}
- phyaddr = rte_mem_virt2phy(g->sg);
+ phyaddr = rte_mem_virt2iova(g->sg);
if (phyaddr == RTE_BAD_PHYS_ADDR) {
PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
goto xmit_failed;
lio_dev_cleanup_iq(lio_dev, iq_no);
}
+ stats->tx_done++;
+ stats->tx_tot_bytes += pkt_len;
processed++;
}
xmit_failed:
+ stats->tx_dropped += (nb_pkts - processed);
return processed;
}
+void
+lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
+{
+ struct lio_instr_queue *txq;
+ struct lio_droq *rxq;
+ uint16_t i;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ if (txq != NULL) {
+ lio_dev_tx_queue_release(txq);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ if (rxq != NULL) {
+ lio_dev_rx_queue_release(rxq);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ }
+}