X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fark%2Fark_ethdev_tx.c;h=676e4115d3bf700d370368720170c5aa39953014;hb=0475c7770502cb4166b2577df3ff446af9d85515;hp=a0e35af8805b5cebd201d8dfd8907b8dfb840768;hpb=e274fbfe321df303df4e06cbaa5407ce2170df75;p=dpdk.git diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c index a0e35af880..676e4115d3 100644 --- a/drivers/net/ark/ark_ethdev_tx.c +++ b/drivers/net/ark/ark_ethdev_tx.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright (c) 2015-2018 Atomic Rules LLC + * Copyright (c) 2015-2021 Atomic Rules LLC */ #include @@ -22,7 +22,7 @@ /* ************************************************************************* */ struct ark_tx_queue { - struct ark_tx_meta *meta_q; + union ark_tx_meta *meta_q; struct rte_mbuf **bufs; /* handles for hw objects */ @@ -32,28 +32,30 @@ struct ark_tx_queue { /* Stats HW tracks bytes and packets, need to count send errors */ uint64_t tx_errors; + tx_user_meta_hook_fn tx_user_meta_hook; + void *ext_user_data; + uint32_t queue_size; uint32_t queue_mask; /* 3 indexes to the paired data rings. */ - uint32_t prod_index; /* where to put the next one */ - uint32_t free_index; /* mbuf has been freed */ + int32_t prod_index; /* where to put the next one */ + int32_t free_index; /* mbuf has been freed */ /* The queue Id is used to identify the HW Q */ uint16_t phys_qid; /* The queue Index within the dpdk device structures */ uint16_t queue_index; - uint32_t pad[1]; - - /* second cache line - fields only used in slow path */ + /* next cache line - fields written by device */ RTE_MARKER cacheline1 __rte_cache_min_aligned; - uint32_t cons_index; /* hw is done, can be freed */ + volatile int32_t cons_index; /* hw is done, can be freed */ } __rte_cache_aligned; /* Forward declarations */ -static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue *queue, - struct rte_mbuf *mbuf); +static int eth_ark_tx_jumbo(struct ark_tx_queue *queue, + struct rte_mbuf *mbuf, + uint32_t *user_meta, uint8_t meta_cnt); static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue); static void free_completed_tx(struct ark_tx_queue *queue); @@ -65,16 +67,44 @@ ark_tx_hw_queue_stop(struct ark_tx_queue *queue) /* ************************************************************************* */ static inline void -eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta, - const struct rte_mbuf *mbuf, - uint8_t flags) +eth_ark_tx_desc_fill(struct ark_tx_queue *queue, + struct rte_mbuf *mbuf, + uint8_t flags, + uint32_t *user_meta, + uint8_t meta_cnt /* 0 to 5 */ + ) { - meta->physaddr = rte_mbuf_data_iova(mbuf); - meta->user1 = (uint32_t)mbuf->udata64; + uint32_t tx_idx; + union ark_tx_meta *meta; + uint8_t m; + + /* Header */ + tx_idx = queue->prod_index & queue->queue_mask; + meta = &queue->meta_q[tx_idx]; meta->data_len = rte_pktmbuf_data_len(mbuf); meta->flags = flags; + meta->meta_cnt = meta_cnt / 2; + meta->user1 = meta_cnt ? (*user_meta++) : 0; + queue->prod_index++; + + queue->bufs[tx_idx] = mbuf; + + /* 1 or 2 user meta data entries, user words 1,2 and 3,4 */ + for (m = 1; m < meta_cnt; m += 2) { + tx_idx = queue->prod_index & queue->queue_mask; + meta = &queue->meta_q[tx_idx]; + meta->usermeta0 = *user_meta++; + meta->usermeta1 = *user_meta++; + queue->prod_index++; + } + + tx_idx = queue->prod_index & queue->queue_mask; + meta = &queue->meta_q[tx_idx]; + meta->physaddr = rte_mbuf_data_iova(mbuf); + queue->prod_index++; } + /* ************************************************************************* */ uint16_t eth_ark_xmit_pkts_noop(void *vtxq __rte_unused, @@ -90,23 +120,26 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct ark_tx_queue *queue; struct rte_mbuf *mbuf; - struct ark_tx_meta *meta; + uint32_t user_meta[5]; - uint32_t idx; - uint32_t prod_index_limit; int stat; + int32_t prod_index_limit; uint16_t nb; + uint8_t user_len = 0; const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN; + tx_user_meta_hook_fn tx_user_meta_hook; queue = (struct ark_tx_queue *)vtxq; + tx_user_meta_hook = queue->tx_user_meta_hook; /* free any packets after the HW is done with them */ free_completed_tx(queue); - prod_index_limit = queue->queue_size + queue->free_index; + /* leave 4 elements mpu data */ + prod_index_limit = queue->queue_size + queue->free_index - 4; for (nb = 0; - (nb < nb_pkts) && (queue->prod_index != prod_index_limit); + (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0; ++nb) { mbuf = tx_pkts[nb]; @@ -132,19 +165,18 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) memset(appended, 0, to_add); } + if (tx_user_meta_hook) + tx_user_meta_hook(mbuf, user_meta, &user_len, + queue->ext_user_data); if (unlikely(mbuf->nb_segs != 1)) { - stat = eth_ark_tx_jumbo(queue, mbuf); + stat = eth_ark_tx_jumbo(queue, mbuf, + user_meta, user_len); if (unlikely(stat != 0)) break; /* Queue is full */ } else { - idx = queue->prod_index & queue->queue_mask; - queue->bufs[idx] = mbuf; - meta = &queue->meta_q[idx]; - eth_ark_tx_meta_from_mbuf(meta, - mbuf, - ARK_DDM_SOP | - ARK_DDM_EOP); - queue->prod_index++; + eth_ark_tx_desc_fill(queue, mbuf, + ARK_DDM_SOP | ARK_DDM_EOP, + user_meta, user_len); } } @@ -172,32 +204,28 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* ************************************************************************* */ -static uint32_t -eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf) +static int +eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf, + uint32_t *user_meta, uint8_t meta_cnt) { struct rte_mbuf *next; - struct ark_tx_meta *meta; - uint32_t free_queue_space; - uint32_t idx; + int32_t free_queue_space; uint8_t flags = ARK_DDM_SOP; free_queue_space = queue->queue_mask - (queue->prod_index - queue->free_index); - if (unlikely(free_queue_space < mbuf->nb_segs)) + /* We need up to 4 mbufs for first header and 2 for subsequent ones */ + if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs)))) return -1; while (mbuf != NULL) { next = mbuf->next; - - idx = queue->prod_index & queue->queue_mask; - queue->bufs[idx] = mbuf; - meta = &queue->meta_q[idx]; - flags |= (next == NULL) ? ARK_DDM_EOP : 0; - eth_ark_tx_meta_from_mbuf(meta, mbuf, flags); - queue->prod_index++; + + eth_ark_tx_desc_fill(queue, mbuf, flags, user_meta, meta_cnt); flags &= ~ARK_DDM_SOP; /* drop SOP flags */ + meta_cnt = 0; /* Meta only on SOP */ mbuf = next; } @@ -226,6 +254,9 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev, return -1; } + /* Each packet requires at least 2 mpu elements - double desc count */ + nb_desc = 2 * nb_desc; + /* Allocate queue struct */ queue = rte_zmalloc_socket("Ark_txqueue", sizeof(struct ark_tx_queue), @@ -244,10 +275,12 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev, queue->phys_qid = qidx; queue->queue_index = queue_idx; dev->data->tx_queues[queue_idx] = queue; + queue->tx_user_meta_hook = ark->user_ext.tx_user_meta_hook; + queue->ext_user_data = ark->user_data[dev->data->port_id]; queue->meta_q = rte_zmalloc_socket("Ark_txqueue meta", - nb_desc * sizeof(struct ark_tx_meta), + nb_desc * sizeof(union ark_tx_meta), 64, socket_id); queue->bufs = @@ -288,7 +321,7 @@ eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue) uint32_t write_interval_ns; /* Verify HW -- MPU */ - if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta))) + if (ark_mpu_verify(queue->mpu, sizeof(union ark_tx_meta))) return -1; queue_base = rte_malloc_virt2iova(queue); @@ -390,19 +423,19 @@ static void free_completed_tx(struct ark_tx_queue *queue) { struct rte_mbuf *mbuf; - struct ark_tx_meta *meta; - uint32_t top_index; + union ark_tx_meta *meta; + int32_t top_index; top_index = queue->cons_index; /* read once */ - while (queue->free_index != top_index) { + while ((top_index - queue->free_index) > 0) { meta = &queue->meta_q[queue->free_index & queue->queue_mask]; - mbuf = queue->bufs[queue->free_index & queue->queue_mask]; - if (likely((meta->flags & ARK_DDM_SOP) != 0)) { + mbuf = queue->bufs[queue->free_index & + queue->queue_mask]; /* ref count of the mbuf is checked in this call. */ rte_pktmbuf_free(mbuf); } - queue->free_index++; + queue->free_index += (meta->meta_cnt + 2); } }