X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fthunderx%2Fnicvf_rxtx.c;h=d0ea95f1d11cdaec83d8d5eb29c42a057a83f7d6;hb=34c3e885bee397c5e3af35e3f2c463cda83739d2;hp=003ab069305126e06d696831fa6dccf55cadf64a;hpb=5c7ccb263b4881213f89037118e9fd9658253da3;p=dpdk.git diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c index 003ab06930..d0ea95f1d1 100644 --- a/drivers/net/thunderx/nicvf_rxtx.c +++ b/drivers/net/thunderx/nicvf_rxtx.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium networks Ltd. 2016. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium networks nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Cavium, Inc */ #include @@ -41,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -53,7 +25,7 @@ #include "nicvf_rxtx.h" #include "nicvf_logs.h" -static inline void __hot +static inline void __rte_hot fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt) { /* Local variable sqe to avoid read from sq desc memory*/ @@ -89,7 +61,15 @@ fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt) entry->buff[0] = sqe.buff[0]; } -void __hot +static inline void __rte_hot +fill_sq_desc_header_zero_w1(union sq_entry_t *entry, + struct rte_mbuf *pkt) +{ + fill_sq_desc_header(entry, pkt); + entry->buff[1] = 0ULL; +} + +void __rte_hot nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq) { int j = 0; @@ -112,7 +92,7 @@ nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq) NICVF_TX_ASSERT(sq->xmit_bufs >= 0); } -void __hot +void __rte_hot nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq) { uint32_t n = 0; @@ -135,7 +115,7 @@ nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq) NICVF_TX_ASSERT(sq->xmit_bufs >= 0); } -static inline uint32_t __hot +static inline uint32_t __rte_hot nicvf_free_tx_desc(struct nicvf_txq *sq) { return ((sq->head - sq->tail - 1) & sq->qlen_mask); @@ -144,7 +124,7 @@ nicvf_free_tx_desc(struct nicvf_txq *sq) /* Send Header + Packet */ #define TX_DESC_PER_PKT 2 -static inline uint32_t __hot +static inline uint32_t __rte_hot nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -162,7 +142,7 @@ nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts, return free_desc; } -uint16_t __hot +uint16_t __rte_hot nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { int i; @@ -190,16 +170,18 @@ nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) free_desc -= TX_DESC_PER_PKT; } - sq->tail = tail; - sq->xmit_bufs += i; - rte_wmb(); + if (likely(i)) { + sq->tail = tail; + sq->xmit_bufs += i; + rte_wmb(); - /* Inform HW to xmit the packets */ - nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT); + /* Inform HW to xmit the packets */ + nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT); + } return i; } -uint16_t __hot +uint16_t __rte_hot nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -230,7 +212,7 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts, used_bufs += nb_segs; txbuffs[tail] = NULL; - fill_sq_desc_header(desc_ptr + tail, pkt); + fill_sq_desc_header_zero_w1(desc_ptr + tail, pkt); tail = (tail + 1) & qlen_mask; txbuffs[tail] = pkt; @@ -246,13 +228,15 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts, } } - sq->tail = tail; - sq->xmit_bufs += used_bufs; - rte_wmb(); + if (likely(used_desc)) { + sq->tail = tail; + sq->xmit_bufs += used_bufs; + rte_wmb(); - /* Inform HW to xmit the packets */ - nicvf_addr_write(sq->sq_door, used_desc); - return nb_pkts; + /* Inform HW to xmit the packets */ + nicvf_addr_write(sq->sq_door, used_desc); + } + return i; } static const uint32_t ptype_table[16][16] __rte_cache_aligned = { @@ -349,13 +333,27 @@ static const uint32_t ptype_table[16][16] __rte_cache_aligned = { [L3_OTHER][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE, }; -static inline uint32_t __hot +static inline uint32_t __rte_hot nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0) { return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type]; } -static inline int __hot +static inline uint64_t __rte_hot +nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0) +{ + static const uint64_t flag_table[3] __rte_cache_aligned = { + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, + }; + + const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 | + (cqe_rx_w0.err_opcode == CQE_RX_ERR_IP_CHK); + return flag_table[idx]; +} + +static inline int __rte_hot nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill) { int i; @@ -387,15 +385,14 @@ nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill) ltail++; } - while (__atomic_load_n(&rbdr->tail, __ATOMIC_RELAXED) != next_tail) - rte_pause(); + rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED); __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE); nicvf_addr_write(door, to_fill); return to_fill; } -static inline int32_t __hot +static inline int32_t __rte_hot nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts, int32_t available_space) { @@ -406,18 +403,20 @@ nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts, return RTE_MIN(nb_pkts, available_space); } -static inline void __hot +static inline void __rte_hot nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2, struct rte_mbuf *pkt) { if (likely(cqe_rx_w0.rss_alg)) { pkt->hash.rss = cqe_rx_w2.rss_tag; pkt->ol_flags |= PKT_RX_RSS_HASH; + } } -uint16_t __hot -nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +static __rte_always_inline uint16_t +nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, + const uint32_t flag) { uint32_t i, to_process; struct cqe_rx_t *cqe_rx; @@ -448,7 +447,19 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset); pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt (rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off); - pkt->ol_flags = 0; + + if (flag & NICVF_RX_OFFLOAD_NONE) + pkt->ol_flags = 0; + if (flag & NICVF_RX_OFFLOAD_CKSUM) + pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0); + if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) { + if (unlikely(cqe_rx_w0.vlan_stripped)) { + pkt->ol_flags |= PKT_RX_VLAN + | PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = + rte_cpu_to_be_16(cqe_rx_w2.vlan_tci); + } + } pkt->data_len = cqe_rx_w3.rb0_sz; pkt->pkt_len = cqe_rx_w3.rb0_sz; pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0); @@ -464,21 +475,52 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxq->head = cqe_head; nicvf_addr_write(rxq->cq_door, to_process); rxq->recv_buffers += to_process; - if (rxq->recv_buffers > rxq->rx_free_thresh) { - rxq->recv_buffers -= nicvf_fill_rbdr(rxq, - rxq->rx_free_thresh); - NICVF_RX_ASSERT(rxq->recv_buffers >= 0); - } + } + if (rxq->recv_buffers > rxq->rx_free_thresh) { + rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh); + NICVF_RX_ASSERT(rxq->recv_buffers >= 0); } return to_process; } -static inline uint16_t __hot +uint16_t __rte_hot +nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_NONE); +} + +uint16_t __rte_hot +nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_CKSUM); +} + +uint16_t __rte_hot +nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + +uint16_t __rte_hot +nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + +static __rte_always_inline uint16_t __rte_hot nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx, uint64_t mbuf_phys_off, struct rte_mbuf **rx_pkt, uint8_t rbptr_offset, - uint64_t mbuf_init) + uint64_t mbuf_init, const uint32_t flag) { struct rte_mbuf *pkt, *seg, *prev; cqe_rx_word0_t cqe_rx_w0; @@ -496,12 +538,22 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx, pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt (rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off); - pkt->ol_flags = 0; pkt->pkt_len = cqe_rx_w1.pkt_len; pkt->data_len = rb_sz[nicvf_frag_num(0)]; nicvf_mbuff_init_mseg_update( pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs); pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0); + if (flag & NICVF_RX_OFFLOAD_NONE) + pkt->ol_flags = 0; + if (flag & NICVF_RX_OFFLOAD_CKSUM) + pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0); + if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) { + if (unlikely(cqe_rx_w0.vlan_stripped)) { + pkt->ol_flags |= PKT_RX_VLAN + | PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci); + } + } nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt); *rx_pkt = pkt; @@ -520,9 +572,9 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx, return nb_segs; } -uint16_t __hot +static __rte_always_inline uint16_t __rte_hot nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) + uint16_t nb_pkts, const uint32_t flag) { union cq_entry_t *cq_entry; struct cqe_rx_t *cqe_rx; @@ -544,7 +596,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, cq_entry = &desc[cqe_head]; cqe_rx = (struct cqe_rx_t *)cq_entry; nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off, - rx_pkts + i, rbptr_offset, mbuf_init); + rx_pkts + i, rbptr_offset, mbuf_init, flag); buffers_consumed += nb_segs; cqe_head = (cqe_head + 1) & cqe_mask; nicvf_prefetch_store_keep(rx_pkts[i]); @@ -555,16 +607,47 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, rxq->head = cqe_head; nicvf_addr_write(rxq->cq_door, to_process); rxq->recv_buffers += buffers_consumed; - if (rxq->recv_buffers > rxq->rx_free_thresh) { - rxq->recv_buffers -= - nicvf_fill_rbdr(rxq, rxq->rx_free_thresh); - NICVF_RX_ASSERT(rxq->recv_buffers >= 0); - } + } + if (rxq->recv_buffers > rxq->rx_free_thresh) { + rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh); + NICVF_RX_ASSERT(rxq->recv_buffers >= 0); } return to_process; } +uint16_t __rte_hot +nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_NONE); +} + +uint16_t __rte_hot +nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_CKSUM); +} + +uint16_t __rte_hot +nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + +uint16_t __rte_hot +nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) {