1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2021 Atomic Rules LLC
7 #include "ark_ethdev_tx.h"
8 #include "ark_global.h"
13 #define ARK_TX_META_SIZE 32
14 #define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
15 #define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
17 #ifndef RTE_LIBRTE_ARK_MIN_TX_PKTLEN
18 #define ARK_MIN_TX_PKTLEN 0
20 #define ARK_MIN_TX_PKTLEN RTE_LIBRTE_ARK_MIN_TX_PKTLEN
23 /* ************************************************************************* */
25 union ark_tx_meta *meta_q;
26 struct rte_mbuf **bufs;
28 /* handles for hw objects */
29 struct ark_mpu_t *mpu;
30 struct ark_ddm_t *ddm;
32 /* Stats HW tracks bytes and packets, need to count send errors */
35 tx_user_meta_hook_fn tx_user_meta_hook;
41 /* 3 indexes to the paired data rings. */
42 int32_t prod_index; /* where to put the next one */
43 int32_t free_index; /* mbuf has been freed */
45 /* The queue Id is used to identify the HW Q */
47 /* The queue Index within the dpdk device structures */
50 /* next cache line - fields written by device */
51 RTE_MARKER cacheline1 __rte_cache_min_aligned;
52 volatile int32_t cons_index; /* hw is done, can be freed */
53 } __rte_cache_aligned;
55 /* Forward declarations */
56 static int eth_ark_tx_jumbo(struct ark_tx_queue *queue,
57 struct rte_mbuf *mbuf,
58 uint32_t *user_meta, uint8_t meta_cnt);
59 static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
60 static void free_completed_tx(struct ark_tx_queue *queue);
63 ark_tx_hw_queue_stop(struct ark_tx_queue *queue)
65 ark_mpu_stop(queue->mpu);
68 /* ************************************************************************* */
70 eth_ark_tx_desc_fill(struct ark_tx_queue *queue,
71 struct rte_mbuf *mbuf,
74 uint8_t meta_cnt /* 0 to 5 */
78 union ark_tx_meta *meta;
82 tx_idx = queue->prod_index & queue->queue_mask;
83 meta = &queue->meta_q[tx_idx];
84 meta->data_len = rte_pktmbuf_data_len(mbuf);
86 meta->meta_cnt = meta_cnt / 2;
87 meta->user1 = meta_cnt ? (*user_meta++) : 0;
90 queue->bufs[tx_idx] = mbuf;
92 /* 1 or 2 user meta data entries, user words 1,2 and 3,4 */
93 for (m = 1; m < meta_cnt; m += 2) {
94 tx_idx = queue->prod_index & queue->queue_mask;
95 meta = &queue->meta_q[tx_idx];
96 meta->usermeta0 = *user_meta++;
97 meta->usermeta1 = *user_meta++;
101 tx_idx = queue->prod_index & queue->queue_mask;
102 meta = &queue->meta_q[tx_idx];
103 meta->physaddr = rte_mbuf_data_iova(mbuf);
108 /* ************************************************************************* */
110 eth_ark_xmit_pkts_noop(void *vtxq __rte_unused,
111 struct rte_mbuf **tx_pkts __rte_unused,
112 uint16_t nb_pkts __rte_unused)
117 /* ************************************************************************* */
119 eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
121 struct ark_tx_queue *queue;
122 struct rte_mbuf *mbuf;
123 uint32_t user_meta[5];
126 int32_t prod_index_limit;
128 uint8_t user_len = 0;
129 const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN;
130 tx_user_meta_hook_fn tx_user_meta_hook;
132 queue = (struct ark_tx_queue *)vtxq;
133 tx_user_meta_hook = queue->tx_user_meta_hook;
135 /* free any packets after the HW is done with them */
136 free_completed_tx(queue);
138 /* leave 4 elements mpu data */
139 prod_index_limit = queue->queue_size + queue->free_index - 4;
142 (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0;
147 unlikely(rte_pktmbuf_pkt_len(mbuf) < min_pkt_len)) {
148 /* this packet even if it is small can be split,
149 * be sure to add to the end mbuf
151 uint16_t to_add = min_pkt_len -
152 rte_pktmbuf_pkt_len(mbuf);
154 rte_pktmbuf_append(mbuf, to_add);
157 /* This packet is in error,
158 * we cannot send it so just
159 * count it and delete it.
161 queue->tx_errors += 1;
162 rte_pktmbuf_free(mbuf);
165 memset(appended, 0, to_add);
168 if (tx_user_meta_hook)
169 tx_user_meta_hook(mbuf, user_meta, &user_len,
170 queue->ext_user_data);
171 if (unlikely(mbuf->nb_segs != 1)) {
172 stat = eth_ark_tx_jumbo(queue, mbuf,
173 user_meta, user_len);
174 if (unlikely(stat != 0))
175 break; /* Queue is full */
177 eth_ark_tx_desc_fill(queue, mbuf,
178 ARK_DDM_SOP | ARK_DDM_EOP,
179 user_meta, user_len);
183 if (ARK_DEBUG_CORE && nb != nb_pkts) {
184 ARK_PMD_LOG(DEBUG, "TX: Failure to send:"
189 " free: %" PRIU32 "\n",
194 ark_mpu_dump(queue->mpu,
199 /* let FPGA know producer index. */
201 ark_mpu_set_producer(queue->mpu, queue->prod_index);
206 /* ************************************************************************* */
208 eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf,
209 uint32_t *user_meta, uint8_t meta_cnt)
211 struct rte_mbuf *next;
212 int32_t free_queue_space;
213 uint8_t flags = ARK_DDM_SOP;
215 free_queue_space = queue->queue_mask -
216 (queue->prod_index - queue->free_index);
217 /* We need up to 4 mbufs for first header and 2 for subsequent ones */
218 if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs))))
221 while (mbuf != NULL) {
223 flags |= (next == NULL) ? ARK_DDM_EOP : 0;
225 eth_ark_tx_desc_fill(queue, mbuf, flags, user_meta, meta_cnt);
227 flags &= ~ARK_DDM_SOP; /* drop SOP flags */
228 meta_cnt = 0; /* Meta only on SOP */
235 /* ************************************************************************* */
237 eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
240 unsigned int socket_id,
241 const struct rte_eth_txconf *tx_conf __rte_unused)
243 struct ark_adapter *ark = dev->data->dev_private;
244 struct ark_tx_queue *queue;
247 int qidx = queue_idx;
249 if (!rte_is_power_of_2(nb_desc)) {
251 "DPDK Arkville configuration queue size"
252 " must be power of two %u (%s)\n",
257 /* Each packet requires at least 2 mpu elements - double desc count */
258 nb_desc = 2 * nb_desc;
260 /* Allocate queue struct */
261 queue = rte_zmalloc_socket("Ark_txqueue",
262 sizeof(struct ark_tx_queue),
266 ARK_PMD_LOG(ERR, "Failed to allocate tx "
267 "queue memory in %s\n",
272 /* we use zmalloc no need to initialize fields */
273 queue->queue_size = nb_desc;
274 queue->queue_mask = nb_desc - 1;
275 queue->phys_qid = qidx;
276 queue->queue_index = queue_idx;
277 dev->data->tx_queues[queue_idx] = queue;
278 queue->tx_user_meta_hook = ark->user_ext.tx_user_meta_hook;
279 queue->ext_user_data = ark->user_data[dev->data->port_id];
282 rte_zmalloc_socket("Ark_txqueue meta",
283 nb_desc * sizeof(union ark_tx_meta),
287 rte_zmalloc_socket("Ark_txqueue bufs",
288 nb_desc * sizeof(struct rte_mbuf *),
292 if (queue->meta_q == 0 || queue->bufs == 0) {
293 ARK_PMD_LOG(ERR, "Failed to allocate "
294 "queue memory in %s\n", __func__);
295 rte_free(queue->meta_q);
296 rte_free(queue->bufs);
301 queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET);
302 queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET);
304 status = eth_ark_tx_hw_queue_config(queue);
306 if (unlikely(status != 0)) {
307 rte_free(queue->meta_q);
308 rte_free(queue->bufs);
310 return -1; /* ERROR CODE */
316 /* ************************************************************************* */
318 eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
320 rte_iova_t queue_base, ring_base, cons_index_addr;
321 uint32_t write_interval_ns;
323 /* Verify HW -- MPU */
324 if (ark_mpu_verify(queue->mpu, sizeof(union ark_tx_meta)))
327 queue_base = rte_malloc_virt2iova(queue);
328 ring_base = rte_malloc_virt2iova(queue->meta_q);
330 queue_base + offsetof(struct ark_tx_queue, cons_index);
332 ark_mpu_stop(queue->mpu);
333 ark_mpu_reset(queue->mpu);
335 /* Stop and Reset and configure MPU */
336 ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1);
339 * Adjust the write interval based on queue size --
340 * increase pcie traffic when low mbuf count
341 * Queue sizes less than 128 are not allowed
343 switch (queue->queue_size) {
345 write_interval_ns = 500;
348 write_interval_ns = 500;
351 write_interval_ns = 1000;
354 write_interval_ns = 2000;
358 /* Completion address in UDM */
359 ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns);
364 /* ************************************************************************* */
366 eth_ark_tx_queue_release(void *vtx_queue)
368 struct ark_tx_queue *queue;
370 queue = (struct ark_tx_queue *)vtx_queue;
372 ark_tx_hw_queue_stop(queue);
374 queue->cons_index = queue->prod_index;
375 free_completed_tx(queue);
377 rte_free(queue->meta_q);
378 rte_free(queue->bufs);
382 /* ************************************************************************* */
384 eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
386 struct ark_tx_queue *queue;
389 queue = dev->data->tx_queues[queue_id];
391 /* Wait for DDM to send out all packets. */
392 while (queue->cons_index != queue->prod_index) {
398 ark_mpu_stop(queue->mpu);
399 free_completed_tx(queue);
401 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
407 eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
409 struct ark_tx_queue *queue;
411 queue = dev->data->tx_queues[queue_id];
412 if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
415 ark_mpu_start(queue->mpu);
416 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
421 /* ************************************************************************* */
423 free_completed_tx(struct ark_tx_queue *queue)
425 struct rte_mbuf *mbuf;
426 union ark_tx_meta *meta;
429 top_index = queue->cons_index; /* read once */
430 while ((top_index - queue->free_index) > 0) {
431 meta = &queue->meta_q[queue->free_index & queue->queue_mask];
432 if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
433 mbuf = queue->bufs[queue->free_index &
435 /* ref count of the mbuf is checked in this call. */
436 rte_pktmbuf_free(mbuf);
438 queue->free_index += (meta->meta_cnt + 2);
442 /* ************************************************************************* */
444 eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
446 struct ark_tx_queue *queue;
447 struct ark_ddm_t *ddm;
448 uint64_t bytes, pkts;
453 bytes = ark_ddm_queue_byte_count(ddm);
454 pkts = ark_ddm_queue_pkt_count(ddm);
456 stats->q_opackets[queue->queue_index] = pkts;
457 stats->q_obytes[queue->queue_index] = bytes;
458 stats->opackets += pkts;
459 stats->obytes += bytes;
460 stats->oerrors += queue->tx_errors;
464 eth_tx_queue_stats_reset(void *vqueue)
466 struct ark_tx_queue *queue;
467 struct ark_ddm_t *ddm;
472 ark_ddm_queue_reset_stats(ddm);
473 queue->tx_errors = 0;