1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2018 Atomic Rules LLC
7 #include "rte_pmd_ark.h"
8 #include "ark_ethdev_tx.h"
9 #include "ark_global.h"
14 #define ARK_TX_META_SIZE 32
15 #define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
16 #define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
18 #ifndef RTE_LIBRTE_ARK_MIN_TX_PKTLEN
19 #define ARK_MIN_TX_PKTLEN 0
21 #define ARK_MIN_TX_PKTLEN RTE_LIBRTE_ARK_MIN_TX_PKTLEN
24 /* ************************************************************************* */
26 struct ark_tx_meta *meta_q;
27 struct rte_mbuf **bufs;
29 /* handles for hw objects */
30 struct ark_mpu_t *mpu;
31 struct ark_ddm_t *ddm;
33 /* Stats HW tracks bytes and packets, need to count send errors */
39 /* 3 indexes to the paired data rings. */
40 uint32_t prod_index; /* where to put the next one */
41 uint32_t free_index; /* mbuf has been freed */
43 /* The queue Id is used to identify the HW Q */
45 /* The queue Index within the dpdk device structures */
50 /* second cache line - fields only used in slow path */
51 RTE_MARKER cacheline1 __rte_cache_min_aligned;
52 uint32_t cons_index; /* hw is done, can be freed */
53 } __rte_cache_aligned;
55 /* Forward declarations */
56 static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue *queue,
57 struct rte_mbuf *mbuf);
58 static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
59 static void free_completed_tx(struct ark_tx_queue *queue);
62 ark_tx_hw_queue_stop(struct ark_tx_queue *queue)
64 ark_mpu_stop(queue->mpu);
67 /* ************************************************************************* */
69 eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
70 const struct rte_mbuf *mbuf,
73 meta->physaddr = rte_mbuf_data_iova(mbuf);
74 meta->user1 = rte_pmd_ark_mbuf_tx_userdata_get(mbuf);
75 meta->data_len = rte_pktmbuf_data_len(mbuf);
79 /* ************************************************************************* */
81 eth_ark_xmit_pkts_noop(void *vtxq __rte_unused,
82 struct rte_mbuf **tx_pkts __rte_unused,
83 uint16_t nb_pkts __rte_unused)
88 /* ************************************************************************* */
90 eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
92 struct ark_tx_queue *queue;
93 struct rte_mbuf *mbuf;
94 struct ark_tx_meta *meta;
97 uint32_t prod_index_limit;
100 const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN;
102 queue = (struct ark_tx_queue *)vtxq;
104 /* free any packets after the HW is done with them */
105 free_completed_tx(queue);
107 prod_index_limit = queue->queue_size + queue->free_index;
110 (nb < nb_pkts) && (queue->prod_index != prod_index_limit);
115 unlikely(rte_pktmbuf_pkt_len(mbuf) < min_pkt_len)) {
116 /* this packet even if it is small can be split,
117 * be sure to add to the end mbuf
119 uint16_t to_add = min_pkt_len -
120 rte_pktmbuf_pkt_len(mbuf);
122 rte_pktmbuf_append(mbuf, to_add);
125 /* This packet is in error,
126 * we cannot send it so just
127 * count it and delete it.
129 queue->tx_errors += 1;
130 rte_pktmbuf_free(mbuf);
133 memset(appended, 0, to_add);
136 if (unlikely(mbuf->nb_segs != 1)) {
137 stat = eth_ark_tx_jumbo(queue, mbuf);
138 if (unlikely(stat != 0))
139 break; /* Queue is full */
141 idx = queue->prod_index & queue->queue_mask;
142 queue->bufs[idx] = mbuf;
143 meta = &queue->meta_q[idx];
144 eth_ark_tx_meta_from_mbuf(meta,
152 if (ARK_DEBUG_CORE && nb != nb_pkts) {
153 ARK_PMD_LOG(DEBUG, "TX: Failure to send:"
158 " free: %" PRIU32 "\n",
163 ark_mpu_dump(queue->mpu,
168 /* let FPGA know producer index. */
170 ark_mpu_set_producer(queue->mpu, queue->prod_index);
175 /* ************************************************************************* */
177 eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf)
179 struct rte_mbuf *next;
180 struct ark_tx_meta *meta;
181 uint32_t free_queue_space;
183 uint8_t flags = ARK_DDM_SOP;
185 free_queue_space = queue->queue_mask -
186 (queue->prod_index - queue->free_index);
187 if (unlikely(free_queue_space < mbuf->nb_segs))
190 while (mbuf != NULL) {
193 idx = queue->prod_index & queue->queue_mask;
194 queue->bufs[idx] = mbuf;
195 meta = &queue->meta_q[idx];
197 flags |= (next == NULL) ? ARK_DDM_EOP : 0;
198 eth_ark_tx_meta_from_mbuf(meta, mbuf, flags);
201 flags &= ~ARK_DDM_SOP; /* drop SOP flags */
208 /* ************************************************************************* */
210 eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
213 unsigned int socket_id,
214 const struct rte_eth_txconf *tx_conf __rte_unused)
216 struct ark_adapter *ark = dev->data->dev_private;
217 struct ark_tx_queue *queue;
220 int qidx = queue_idx;
222 if (!rte_is_power_of_2(nb_desc)) {
224 "DPDK Arkville configuration queue size"
225 " must be power of two %u (%s)\n",
230 /* Allocate queue struct */
231 queue = rte_zmalloc_socket("Ark_txqueue",
232 sizeof(struct ark_tx_queue),
236 ARK_PMD_LOG(ERR, "Failed to allocate tx "
237 "queue memory in %s\n",
242 /* we use zmalloc no need to initialize fields */
243 queue->queue_size = nb_desc;
244 queue->queue_mask = nb_desc - 1;
245 queue->phys_qid = qidx;
246 queue->queue_index = queue_idx;
247 dev->data->tx_queues[queue_idx] = queue;
250 rte_zmalloc_socket("Ark_txqueue meta",
251 nb_desc * sizeof(struct ark_tx_meta),
255 rte_zmalloc_socket("Ark_txqueue bufs",
256 nb_desc * sizeof(struct rte_mbuf *),
260 if (queue->meta_q == 0 || queue->bufs == 0) {
261 ARK_PMD_LOG(ERR, "Failed to allocate "
262 "queue memory in %s\n", __func__);
263 rte_free(queue->meta_q);
264 rte_free(queue->bufs);
269 queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET);
270 queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET);
272 status = eth_ark_tx_hw_queue_config(queue);
274 if (unlikely(status != 0)) {
275 rte_free(queue->meta_q);
276 rte_free(queue->bufs);
278 return -1; /* ERROR CODE */
284 /* ************************************************************************* */
286 eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
288 rte_iova_t queue_base, ring_base, cons_index_addr;
289 uint32_t write_interval_ns;
291 /* Verify HW -- MPU */
292 if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
295 queue_base = rte_malloc_virt2iova(queue);
296 ring_base = rte_malloc_virt2iova(queue->meta_q);
298 queue_base + offsetof(struct ark_tx_queue, cons_index);
300 ark_mpu_stop(queue->mpu);
301 ark_mpu_reset(queue->mpu);
303 /* Stop and Reset and configure MPU */
304 ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1);
307 * Adjust the write interval based on queue size --
308 * increase pcie traffic when low mbuf count
309 * Queue sizes less than 128 are not allowed
311 switch (queue->queue_size) {
313 write_interval_ns = 500;
316 write_interval_ns = 500;
319 write_interval_ns = 1000;
322 write_interval_ns = 2000;
326 /* Completion address in UDM */
327 ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns);
332 /* ************************************************************************* */
334 eth_ark_tx_queue_release(void *vtx_queue)
336 struct ark_tx_queue *queue;
338 queue = (struct ark_tx_queue *)vtx_queue;
340 ark_tx_hw_queue_stop(queue);
342 queue->cons_index = queue->prod_index;
343 free_completed_tx(queue);
345 rte_free(queue->meta_q);
346 rte_free(queue->bufs);
350 /* ************************************************************************* */
352 eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
354 struct ark_tx_queue *queue;
357 queue = dev->data->tx_queues[queue_id];
359 /* Wait for DDM to send out all packets. */
360 while (queue->cons_index != queue->prod_index) {
366 ark_mpu_stop(queue->mpu);
367 free_completed_tx(queue);
369 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
375 eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
377 struct ark_tx_queue *queue;
379 queue = dev->data->tx_queues[queue_id];
380 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
383 ark_mpu_start(queue->mpu);
384 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
389 /* ************************************************************************* */
391 free_completed_tx(struct ark_tx_queue *queue)
393 struct rte_mbuf *mbuf;
394 struct ark_tx_meta *meta;
397 top_index = queue->cons_index; /* read once */
398 while (queue->free_index != top_index) {
399 meta = &queue->meta_q[queue->free_index & queue->queue_mask];
400 mbuf = queue->bufs[queue->free_index & queue->queue_mask];
402 if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
403 /* ref count of the mbuf is checked in this call. */
404 rte_pktmbuf_free(mbuf);
410 /* ************************************************************************* */
412 eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
414 struct ark_tx_queue *queue;
415 struct ark_ddm_t *ddm;
416 uint64_t bytes, pkts;
421 bytes = ark_ddm_queue_byte_count(ddm);
422 pkts = ark_ddm_queue_pkt_count(ddm);
424 stats->q_opackets[queue->queue_index] = pkts;
425 stats->q_obytes[queue->queue_index] = bytes;
426 stats->opackets += pkts;
427 stats->obytes += bytes;
428 stats->oerrors += queue->tx_errors;
432 eth_tx_queue_stats_reset(void *vqueue)
434 struct ark_tx_queue *queue;
435 struct ark_ddm_t *ddm;
440 ark_ddm_queue_reset_stats(ddm);
441 queue->tx_errors = 0;