1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2021 Atomic Rules LLC
7 #include "rte_pmd_ark.h"
8 #include "ark_ethdev_tx.h"
9 #include "ark_global.h"
14 #define ARK_TX_META_SIZE 32
15 #define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
16 #define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
18 #ifndef RTE_LIBRTE_ARK_MIN_TX_PKTLEN
19 #define ARK_MIN_TX_PKTLEN 0
21 #define ARK_MIN_TX_PKTLEN RTE_LIBRTE_ARK_MIN_TX_PKTLEN
24 /* ************************************************************************* */
26 union ark_tx_meta *meta_q;
27 struct rte_mbuf **bufs;
29 /* handles for hw objects */
30 struct ark_mpu_t *mpu;
31 struct ark_ddm_t *ddm;
33 /* Stats HW tracks bytes and packets, need to count send errors */
39 /* 3 indexes to the paired data rings. */
40 int32_t prod_index; /* where to put the next one */
41 int32_t free_index; /* mbuf has been freed */
43 /* The queue Id is used to identify the HW Q */
45 /* The queue Index within the dpdk device structures */
50 /* second cache line - fields written by device */
51 RTE_MARKER cacheline1 __rte_cache_min_aligned;
52 volatile int32_t cons_index; /* hw is done, can be freed */
53 } __rte_cache_aligned;
55 /* Forward declarations */
56 static int eth_ark_tx_jumbo(struct ark_tx_queue *queue,
57 struct rte_mbuf *mbuf,
58 uint32_t *user_meta, uint8_t meta_cnt);
59 static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
60 static void free_completed_tx(struct ark_tx_queue *queue);
63 ark_tx_hw_queue_stop(struct ark_tx_queue *queue)
65 ark_mpu_stop(queue->mpu);
68 /* ************************************************************************* */
70 eth_ark_tx_desc_fill(struct ark_tx_queue *queue,
71 struct rte_mbuf *mbuf,
74 uint8_t meta_cnt /* 0 to 5 */
78 union ark_tx_meta *meta;
82 tx_idx = queue->prod_index & queue->queue_mask;
83 meta = &queue->meta_q[tx_idx];
84 meta->data_len = rte_pktmbuf_data_len(mbuf);
86 meta->meta_cnt = meta_cnt / 2;
87 meta->user1 = meta_cnt ? (*user_meta++) : 0;
90 queue->bufs[tx_idx] = mbuf;
92 /* 1 or 2 user meta data entries, user words 1,2 and 3,4 */
93 for (m = 1; m < meta_cnt; m += 2) {
94 tx_idx = queue->prod_index & queue->queue_mask;
95 meta = &queue->meta_q[tx_idx];
96 meta->usermeta0 = *user_meta++;
97 meta->usermeta1 = *user_meta++;
101 tx_idx = queue->prod_index & queue->queue_mask;
102 meta = &queue->meta_q[tx_idx];
103 meta->physaddr = rte_mbuf_data_iova(mbuf);
108 /* ************************************************************************* */
110 eth_ark_xmit_pkts_noop(void *vtxq __rte_unused,
111 struct rte_mbuf **tx_pkts __rte_unused,
112 uint16_t nb_pkts __rte_unused)
117 /* ************************************************************************* */
119 eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
121 struct ark_tx_queue *queue;
122 struct rte_mbuf *mbuf;
126 int32_t prod_index_limit;
128 uint8_t user_len = 1;
129 const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN;
131 queue = (struct ark_tx_queue *)vtxq;
133 /* free any packets after the HW is done with them */
134 free_completed_tx(queue);
136 /* leave 4 elements mpu data */
137 prod_index_limit = queue->queue_size + queue->free_index - 4;
140 (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0;
145 unlikely(rte_pktmbuf_pkt_len(mbuf) < min_pkt_len)) {
146 /* this packet even if it is small can be split,
147 * be sure to add to the end mbuf
149 uint16_t to_add = min_pkt_len -
150 rte_pktmbuf_pkt_len(mbuf);
152 rte_pktmbuf_append(mbuf, to_add);
155 /* This packet is in error,
156 * we cannot send it so just
157 * count it and delete it.
159 queue->tx_errors += 1;
160 rte_pktmbuf_free(mbuf);
163 memset(appended, 0, to_add);
166 user_meta = rte_pmd_ark_mbuf_tx_userdata_get(mbuf);
167 if (unlikely(mbuf->nb_segs != 1)) {
168 stat = eth_ark_tx_jumbo(queue, mbuf,
169 &user_meta, user_len);
170 if (unlikely(stat != 0))
171 break; /* Queue is full */
173 eth_ark_tx_desc_fill(queue, mbuf,
174 ARK_DDM_SOP | ARK_DDM_EOP,
175 &user_meta, user_len);
179 if (ARK_DEBUG_CORE && nb != nb_pkts) {
180 ARK_PMD_LOG(DEBUG, "TX: Failure to send:"
185 " free: %" PRIU32 "\n",
190 ark_mpu_dump(queue->mpu,
195 /* let FPGA know producer index. */
197 ark_mpu_set_producer(queue->mpu, queue->prod_index);
202 /* ************************************************************************* */
204 eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf,
205 uint32_t *user_meta, uint8_t meta_cnt)
207 struct rte_mbuf *next;
208 int32_t free_queue_space;
209 uint8_t flags = ARK_DDM_SOP;
211 free_queue_space = queue->queue_mask -
212 (queue->prod_index - queue->free_index);
213 /* We need up to 4 mbufs for first header and 2 for subsequent ones */
214 if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs))))
217 while (mbuf != NULL) {
219 flags |= (next == NULL) ? ARK_DDM_EOP : 0;
221 eth_ark_tx_desc_fill(queue, mbuf, flags, user_meta, meta_cnt);
223 flags &= ~ARK_DDM_SOP; /* drop SOP flags */
224 meta_cnt = 0; /* Meta only on SOP */
231 /* ************************************************************************* */
233 eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
236 unsigned int socket_id,
237 const struct rte_eth_txconf *tx_conf __rte_unused)
239 struct ark_adapter *ark = dev->data->dev_private;
240 struct ark_tx_queue *queue;
243 int qidx = queue_idx;
245 if (!rte_is_power_of_2(nb_desc)) {
247 "DPDK Arkville configuration queue size"
248 " must be power of two %u (%s)\n",
253 /* Each packet requires at least 2 mpu elements - double desc count */
254 nb_desc = 2 * nb_desc;
256 /* Allocate queue struct */
257 queue = rte_zmalloc_socket("Ark_txqueue",
258 sizeof(struct ark_tx_queue),
262 ARK_PMD_LOG(ERR, "Failed to allocate tx "
263 "queue memory in %s\n",
268 /* we use zmalloc no need to initialize fields */
269 queue->queue_size = nb_desc;
270 queue->queue_mask = nb_desc - 1;
271 queue->phys_qid = qidx;
272 queue->queue_index = queue_idx;
273 dev->data->tx_queues[queue_idx] = queue;
276 rte_zmalloc_socket("Ark_txqueue meta",
277 nb_desc * sizeof(union ark_tx_meta),
281 rte_zmalloc_socket("Ark_txqueue bufs",
282 nb_desc * sizeof(struct rte_mbuf *),
286 if (queue->meta_q == 0 || queue->bufs == 0) {
287 ARK_PMD_LOG(ERR, "Failed to allocate "
288 "queue memory in %s\n", __func__);
289 rte_free(queue->meta_q);
290 rte_free(queue->bufs);
295 queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET);
296 queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET);
298 status = eth_ark_tx_hw_queue_config(queue);
300 if (unlikely(status != 0)) {
301 rte_free(queue->meta_q);
302 rte_free(queue->bufs);
304 return -1; /* ERROR CODE */
310 /* ************************************************************************* */
312 eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
314 rte_iova_t queue_base, ring_base, cons_index_addr;
315 uint32_t write_interval_ns;
317 /* Verify HW -- MPU */
318 if (ark_mpu_verify(queue->mpu, sizeof(union ark_tx_meta)))
321 queue_base = rte_malloc_virt2iova(queue);
322 ring_base = rte_malloc_virt2iova(queue->meta_q);
324 queue_base + offsetof(struct ark_tx_queue, cons_index);
326 ark_mpu_stop(queue->mpu);
327 ark_mpu_reset(queue->mpu);
329 /* Stop and Reset and configure MPU */
330 ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1);
333 * Adjust the write interval based on queue size --
334 * increase pcie traffic when low mbuf count
335 * Queue sizes less than 128 are not allowed
337 switch (queue->queue_size) {
339 write_interval_ns = 500;
342 write_interval_ns = 500;
345 write_interval_ns = 1000;
348 write_interval_ns = 2000;
352 /* Completion address in UDM */
353 ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns);
358 /* ************************************************************************* */
360 eth_ark_tx_queue_release(void *vtx_queue)
362 struct ark_tx_queue *queue;
364 queue = (struct ark_tx_queue *)vtx_queue;
366 ark_tx_hw_queue_stop(queue);
368 queue->cons_index = queue->prod_index;
369 free_completed_tx(queue);
371 rte_free(queue->meta_q);
372 rte_free(queue->bufs);
376 /* ************************************************************************* */
378 eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
380 struct ark_tx_queue *queue;
383 queue = dev->data->tx_queues[queue_id];
385 /* Wait for DDM to send out all packets. */
386 while (queue->cons_index != queue->prod_index) {
392 ark_mpu_stop(queue->mpu);
393 free_completed_tx(queue);
395 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
401 eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
403 struct ark_tx_queue *queue;
405 queue = dev->data->tx_queues[queue_id];
406 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
409 ark_mpu_start(queue->mpu);
410 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
415 /* ************************************************************************* */
417 free_completed_tx(struct ark_tx_queue *queue)
419 struct rte_mbuf *mbuf;
420 union ark_tx_meta *meta;
423 top_index = queue->cons_index; /* read once */
424 while ((top_index - queue->free_index) > 0) {
425 meta = &queue->meta_q[queue->free_index & queue->queue_mask];
426 if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
427 mbuf = queue->bufs[queue->free_index &
429 /* ref count of the mbuf is checked in this call. */
430 rte_pktmbuf_free(mbuf);
432 queue->free_index += (meta->meta_cnt + 2);
436 /* ************************************************************************* */
438 eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
440 struct ark_tx_queue *queue;
441 struct ark_ddm_t *ddm;
442 uint64_t bytes, pkts;
447 bytes = ark_ddm_queue_byte_count(ddm);
448 pkts = ark_ddm_queue_pkt_count(ddm);
450 stats->q_opackets[queue->queue_index] = pkts;
451 stats->q_obytes[queue->queue_index] = bytes;
452 stats->opackets += pkts;
453 stats->obytes += bytes;
454 stats->oerrors += queue->tx_errors;
458 eth_tx_queue_stats_reset(void *vqueue)
460 struct ark_tx_queue *queue;
461 struct ark_ddm_t *ddm;
466 ark_ddm_queue_reset_stats(ddm);
467 queue->tx_errors = 0;