X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fionic%2Fionic_rxtx.c;h=6ecb500b9e512075ada9205c130e815fc8cdf7ea;hb=4ad56b7a7cb89148d554f006a66c6d74de2ba874;hp=2a47a282ad5322807f58998dcbcd232b08ad7011;hpb=68591087ac59825e804d86aa70012e9b88a4427b;p=dpdk.git diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c index 2a47a282ad..6ecb500b9e 100644 --- a/drivers/net/ionic/ionic_rxtx.c +++ b/drivers/net/ionic/ionic_rxtx.c @@ -67,19 +67,20 @@ ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; } -static inline void __rte_cold -ionic_tx_flush(struct ionic_cq *cq) +static __rte_always_inline void +ionic_tx_flush(struct ionic_qcq *txq) { - struct ionic_queue *q = cq->bound_q; - struct ionic_desc_info *q_desc_info; + struct ionic_cq *cq = &txq->cq; + struct ionic_queue *q = &txq->q; struct rte_mbuf *txm, *next; struct ionic_txq_comp *cq_desc_base = cq->base; struct ionic_txq_comp *cq_desc; + void **info; u_int32_t comp_index = (u_int32_t)-1; cq_desc = &cq_desc_base[cq->tail_idx]; while (color_match(cq_desc->color, cq->done_color)) { - cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); /* Prefetch the next 4 descriptors (not really useful here) */ if ((cq->tail_idx & 0x3) == 0) @@ -95,9 +96,9 @@ ionic_tx_flush(struct ionic_cq *cq) if (comp_index != (u_int32_t)-1) { while (q->tail_idx != comp_index) { - q_desc_info = &q->info[q->tail_idx]; + info = IONIC_INFO_PTR(q, q->tail_idx); - q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + q->tail_idx = Q_NEXT_TO_SRVC(q, 1); /* Prefetch the next 4 descriptors */ if ((q->tail_idx & 0x3) == 0) @@ -108,7 +109,7 @@ ionic_tx_flush(struct ionic_cq *cq) * Note: you can just use rte_pktmbuf_free, * but this loop is faster */ - txm = q_desc_info->cb_arg; + txm = info[0]; while (txm != NULL) { next = txm->next; rte_pktmbuf_free_seg(txm); @@ -149,7 +150,7 @@ ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) ionic_qcq_disable(txq); - ionic_tx_flush(&txq->cq); + ionic_tx_flush(txq); return 0; } @@ -310,16 +311,16 @@ ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, desc->hdr_len = hdrlen; desc->mss = mss; - ionic_q_post(q, done, NULL, done ? txm : NULL); + ionic_q_post(q, done, done ? txm : NULL); } static struct ionic_txq_desc * ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem) { struct ionic_txq_desc *desc_base = q->base; - struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; + struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; struct ionic_txq_desc *desc = &desc_base[q->head_idx]; - struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx]; + struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; *elem = sg_desc->elems; return desc; @@ -334,7 +335,8 @@ ionic_tx_tso(struct ionic_qcq *txq, struct rte_mbuf *txm, struct ionic_txq_desc *desc; struct ionic_txq_sg_elem *elem; struct rte_mbuf *txm_seg; - uint64_t desc_addr = 0; + rte_iova_t data_iova; + uint64_t desc_addr = 0, next_addr; uint16_t desc_len = 0; uint8_t desc_nsge; uint32_t hdrlen; @@ -371,6 +373,7 @@ ionic_tx_tso(struct ionic_qcq *txq, struct rte_mbuf *txm, seglen = hdrlen + mss; left = txm->data_len; + data_iova = rte_mbuf_data_iova(txm); desc = ionic_tx_tso_next(q, &elem); start = true; @@ -380,7 +383,7 @@ ionic_tx_tso(struct ionic_qcq *txq, struct rte_mbuf *txm, while (left > 0) { len = RTE_MIN(seglen, left); frag_left = seglen - len; - desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); + desc_addr = rte_cpu_to_le_64(data_iova + offset); desc_len = len; desc_nsge = 0; left -= len; @@ -404,24 +407,23 @@ ionic_tx_tso(struct ionic_qcq *txq, struct rte_mbuf *txm, txm_seg = txm->next; while (txm_seg != NULL) { offset = 0; + data_iova = rte_mbuf_data_iova(txm_seg); left = txm_seg->data_len; stats->frags++; while (left > 0) { - rte_iova_t data_iova; - data_iova = rte_mbuf_data_iova(txm_seg); - elem->addr = rte_cpu_to_le_64(data_iova) + offset; + next_addr = rte_cpu_to_le_64(data_iova + offset); if (frag_left > 0) { len = RTE_MIN(frag_left, left); frag_left -= len; + elem->addr = next_addr; elem->len = len; elem++; desc_nsge++; } else { len = RTE_MIN(mss, left); frag_left = mss - len; - data_iova = rte_mbuf_data_iova(txm_seg); - desc_addr = rte_cpu_to_le_64(data_iova); + desc_addr = next_addr; desc_len = len; desc_nsge = 0; } @@ -429,6 +431,7 @@ ionic_tx_tso(struct ionic_qcq *txq, struct rte_mbuf *txm, offset += len; if (txm_seg->next != NULL && frag_left > 0) continue; + done = (txm_seg->next == NULL && left == 0); ionic_tx_tso_post(q, desc, txm_seg, desc_addr, desc_nsge, desc_len, @@ -448,22 +451,22 @@ ionic_tx_tso(struct ionic_qcq *txq, struct rte_mbuf *txm, return 0; } -static int +static __rte_always_inline int ionic_tx(struct ionic_qcq *txq, struct rte_mbuf *txm, bool not_xmit_more) { struct ionic_queue *q = &txq->q; struct ionic_txq_desc *desc_base = q->base; - struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; + struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; struct ionic_txq_desc *desc = &desc_base[q->head_idx]; - struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx]; + struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; struct ionic_txq_sg_elem *elem = sg_desc->elems; struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); struct rte_mbuf *txm_seg; bool encap; bool has_vlan; uint64_t ol_flags = txm->ol_flags; - uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); + uint64_t addr; uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; uint8_t flags = 0; @@ -493,6 +496,8 @@ ionic_tx(struct ionic_qcq *txq, struct rte_mbuf *txm, flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); + desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); desc->len = txm->data_len; desc->vlan_tci = txm->vlan_tci; @@ -506,7 +511,7 @@ ionic_tx(struct ionic_qcq *txq, struct rte_mbuf *txm, txm_seg = txm_seg->next; } - ionic_q_post(q, not_xmit_more, NULL, txm); + ionic_q_post(q, not_xmit_more, txm); return 0; } @@ -517,7 +522,6 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, { struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue; struct ionic_queue *q = &txq->q; - struct ionic_cq *cq = &txq->cq; struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); uint32_t next_q_head_idx; uint32_t bytes_tx = 0; @@ -526,7 +530,7 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, bool last; /* Cleaning old buffers */ - ionic_tx_flush(cq); + ionic_tx_flush(txq); if (unlikely(ionic_q_space_avail(q) < nb_pkts)) { stats->stop += nb_pkts; @@ -536,7 +540,7 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, while (nb_tx < nb_pkts) { last = (nb_tx == (nb_pkts - 1)); - next_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1); + next_q_head_idx = Q_NEXT_TO_POST(q, 1); if ((next_q_head_idx & 0x3) == 0) { struct ionic_txq_desc *desc_base = q->base; rte_prefetch0(&desc_base[next_q_head_idx]); @@ -592,7 +596,7 @@ ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, for (i = 0; i < nb_pkts; i++) { txm = tx_pkts[i]; - if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) { + if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS_V1 + 1) { rte_errno = -EINVAL; break; } @@ -635,15 +639,15 @@ static void __rte_cold ionic_rx_empty(struct ionic_queue *q) { struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); - struct ionic_desc_info *cur; struct rte_mbuf *mbuf; + void **info; while (q->tail_idx != q->head_idx) { - cur = &q->info[q->tail_idx]; - mbuf = cur->cb_arg; + info = IONIC_INFO_PTR(q, q->tail_idx); + mbuf = info[0]; rte_mempool_put(rxq->mb_pool, mbuf); - q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + q->tail_idx = Q_NEXT_TO_SRVC(q, 1); } } @@ -736,16 +740,16 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, return 0; } -static void -ionic_rx_clean(struct ionic_queue *q, +static __rte_always_inline void +ionic_rx_clean(struct ionic_qcq *rxq, uint32_t q_desc_index, uint32_t cq_desc_index, - void *cb_arg, void *service_cb_arg) + void *service_cb_arg) { - struct ionic_rxq_comp *cq_desc_base = q->bound_cq->base; + struct ionic_queue *q = &rxq->q; + struct ionic_cq *cq = &rxq->cq; + struct ionic_rxq_comp *cq_desc_base = cq->base; struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; - struct rte_mbuf *rxm = cb_arg; - struct rte_mbuf *rxm_seg; - struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); + struct rte_mbuf *rxm, *rxm_seg; uint32_t max_frame_size = rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; uint64_t pkt_flags = 0; @@ -757,6 +761,13 @@ ionic_rx_clean(struct ionic_queue *q, (rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM); uint32_t left; + void **info; + + assert(q_desc_index == cq_desc->comp_index); + + info = IONIC_INFO_PTR(q, cq_desc->comp_index); + + rxm = info[0]; if (!recv_args) { stats->no_cb_arg++; @@ -894,10 +905,10 @@ ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, new->addr = old->addr; new->len = old->len; - ionic_q_post(q, true, ionic_rx_clean, mbuf); + ionic_q_post(q, true, mbuf); } -static int __rte_cold +static __rte_always_inline int ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len) { struct ionic_queue *q = &rxq->q; @@ -965,7 +976,7 @@ ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len) ring_doorbell = ((q->head_idx + 1) & IONIC_RX_RING_DOORBELL_STRIDE) == 0; - ionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm); + ionic_q_post(q, ring_doorbell, rxm); } return 0; @@ -1013,12 +1024,12 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) return 0; } -static inline void __rte_cold -ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do, +static __rte_always_inline void +ionic_rxq_service(struct ionic_qcq *rxq, uint32_t work_to_do, void *service_cb_arg) { - struct ionic_queue *q = cq->bound_q; - struct ionic_desc_info *q_desc_info; + struct ionic_cq *cq = &rxq->cq; + struct ionic_queue *q = &rxq->q; struct ionic_rxq_comp *cq_desc_base = cq->base; struct ionic_rxq_comp *cq_desc; bool more; @@ -1031,7 +1042,7 @@ ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do, cq_desc = &cq_desc_base[cq->tail_idx]; while (color_match(cq_desc->pkt_type_color, cq->done_color)) { curr_cq_tail_idx = cq->tail_idx; - cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); if (cq->tail_idx == 0) cq->done_color = !cq->done_color; @@ -1043,18 +1054,16 @@ ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do, do { more = (q->tail_idx != cq_desc->comp_index); - q_desc_info = &q->info[q->tail_idx]; - curr_q_tail_idx = q->tail_idx; - q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + q->tail_idx = Q_NEXT_TO_SRVC(q, 1); /* Prefetch the next 4 descriptors */ if ((q->tail_idx & 0x3) == 0) /* q desc info */ rte_prefetch0(&q->info[q->tail_idx]); - ionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx, - q_desc_info->cb_arg, service_cb_arg); + ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx, + service_cb_arg); } while (more); @@ -1083,7 +1092,7 @@ ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) ionic_qcq_disable(rxq); /* Flush */ - ionic_rxq_service(&rxq->cq, -1, NULL); + ionic_rxq_service(rxq, -1, NULL); return 0; } @@ -1095,14 +1104,13 @@ ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue; uint32_t frame_size = rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; - struct ionic_cq *cq = &rxq->cq; struct ionic_rx_service service_cb_arg; service_cb_arg.rx_pkts = rx_pkts; service_cb_arg.nb_pkts = nb_pkts; service_cb_arg.nb_rx = 0; - ionic_rxq_service(cq, nb_pkts, &service_cb_arg); + ionic_rxq_service(rxq, nb_pkts, &service_cb_arg); ionic_rx_fill(rxq, frame_size);