qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
}
-static inline void __rte_cold
+static __rte_always_inline void
ionic_tx_flush(struct ionic_cq *cq)
{
struct ionic_queue *q = cq->bound_q;
ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem)
{
struct ionic_txq_desc *desc_base = q->base;
- struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
+ struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
struct ionic_txq_desc *desc = &desc_base[q->head_idx];
- struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
+ struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
*elem = sg_desc->elems;
return desc;
struct ionic_txq_desc *desc;
struct ionic_txq_sg_elem *elem;
struct rte_mbuf *txm_seg;
- uint64_t desc_addr = 0;
+ rte_iova_t data_iova;
+ uint64_t desc_addr = 0, next_addr;
uint16_t desc_len = 0;
uint8_t desc_nsge;
uint32_t hdrlen;
seglen = hdrlen + mss;
left = txm->data_len;
+ data_iova = rte_mbuf_data_iova(txm);
desc = ionic_tx_tso_next(q, &elem);
start = true;
while (left > 0) {
len = RTE_MIN(seglen, left);
frag_left = seglen - len;
- desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
+ desc_addr = rte_cpu_to_le_64(data_iova + offset);
desc_len = len;
desc_nsge = 0;
left -= len;
txm_seg = txm->next;
while (txm_seg != NULL) {
offset = 0;
+ data_iova = rte_mbuf_data_iova(txm_seg);
left = txm_seg->data_len;
stats->frags++;
while (left > 0) {
- rte_iova_t data_iova;
- data_iova = rte_mbuf_data_iova(txm_seg);
- elem->addr = rte_cpu_to_le_64(data_iova) + offset;
+ next_addr = rte_cpu_to_le_64(data_iova + offset);
if (frag_left > 0) {
len = RTE_MIN(frag_left, left);
frag_left -= len;
+ elem->addr = next_addr;
elem->len = len;
elem++;
desc_nsge++;
} else {
len = RTE_MIN(mss, left);
frag_left = mss - len;
- data_iova = rte_mbuf_data_iova(txm_seg);
- desc_addr = rte_cpu_to_le_64(data_iova);
+ desc_addr = next_addr;
desc_len = len;
desc_nsge = 0;
}
offset += len;
if (txm_seg->next != NULL && frag_left > 0)
continue;
+
done = (txm_seg->next == NULL && left == 0);
ionic_tx_tso_post(q, desc, txm_seg,
desc_addr, desc_nsge, desc_len,
return 0;
}
-static int
+static __rte_always_inline int
ionic_tx(struct ionic_qcq *txq, struct rte_mbuf *txm,
bool not_xmit_more)
{
struct ionic_queue *q = &txq->q;
struct ionic_txq_desc *desc_base = q->base;
- struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
+ struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
struct ionic_txq_desc *desc = &desc_base[q->head_idx];
- struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
+ struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
struct rte_mbuf *txm_seg;
bool encap;
bool has_vlan;
uint64_t ol_flags = txm->ol_flags;
- uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
+ uint64_t addr;
uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
uint8_t flags = 0;
flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
+ addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
+
desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
desc->len = txm->data_len;
desc->vlan_tci = txm->vlan_tci;
return 0;
}
-static void
+static __rte_always_inline void
ionic_rx_clean(struct ionic_queue *q,
uint32_t q_desc_index, uint32_t cq_desc_index,
void *cb_arg, void *service_cb_arg)
ionic_q_post(q, true, ionic_rx_clean, mbuf);
}
-static int __rte_cold
+static __rte_always_inline int
ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
{
struct ionic_queue *q = &rxq->q;
return 0;
}
-static inline void __rte_cold
+static __rte_always_inline void
ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
void *service_cb_arg)
{