-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2021 Atomic Rules LLC
*/
#include <unistd.h>
#define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
#define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+#ifndef RTE_LIBRTE_ARK_MIN_TX_PKTLEN
+#define ARK_MIN_TX_PKTLEN 0
+#else
+#define ARK_MIN_TX_PKTLEN RTE_LIBRTE_ARK_MIN_TX_PKTLEN
+#endif
/* ************************************************************************* */
struct ark_tx_queue {
- struct ark_tx_meta *meta_q;
+ union ark_tx_meta *meta_q;
struct rte_mbuf **bufs;
/* handles for hw objects */
/* Stats HW tracks bytes and packets, need to count send errors */
uint64_t tx_errors;
+ tx_user_meta_hook_fn tx_user_meta_hook;
+ void *ext_user_data;
+
uint32_t queue_size;
uint32_t queue_mask;
/* 3 indexes to the paired data rings. */
- uint32_t prod_index; /* where to put the next one */
- uint32_t free_index; /* mbuf has been freed */
+ int32_t prod_index; /* where to put the next one */
+ int32_t free_index; /* mbuf has been freed */
/* The queue Id is used to identify the HW Q */
uint16_t phys_qid;
/* The queue Index within the dpdk device structures */
uint16_t queue_index;
- uint32_t pad[1];
-
- /* second cache line - fields only used in slow path */
- MARKER cacheline1 __rte_cache_min_aligned;
- uint32_t cons_index; /* hw is done, can be freed */
+ /* next cache line - fields written by device */
+ RTE_MARKER cacheline1 __rte_cache_min_aligned;
+ volatile int32_t cons_index; /* hw is done, can be freed */
} __rte_cache_aligned;
/* Forward declarations */
-static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue *queue,
- struct rte_mbuf *mbuf);
+static int eth_ark_tx_jumbo(struct ark_tx_queue *queue,
+ struct rte_mbuf *mbuf,
+ uint32_t *user_meta, uint8_t meta_cnt);
static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
static void free_completed_tx(struct ark_tx_queue *queue);
/* ************************************************************************* */
static inline void
-eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
- const struct rte_mbuf *mbuf,
- uint8_t flags)
+eth_ark_tx_desc_fill(struct ark_tx_queue *queue,
+ struct rte_mbuf *mbuf,
+ uint8_t flags,
+ uint32_t *user_meta,
+ uint8_t meta_cnt /* 0 to 5 */
+ )
{
- meta->physaddr = rte_mbuf_data_iova(mbuf);
- meta->delta_ns = 0;
+ uint32_t tx_idx;
+ union ark_tx_meta *meta;
+ uint8_t m;
+
+ /* Header */
+ tx_idx = queue->prod_index & queue->queue_mask;
+ meta = &queue->meta_q[tx_idx];
meta->data_len = rte_pktmbuf_data_len(mbuf);
meta->flags = flags;
-}
+ meta->meta_cnt = meta_cnt / 2;
+ meta->user1 = meta_cnt ? (*user_meta++) : 0;
+ queue->prod_index++;
+
+ queue->bufs[tx_idx] = mbuf;
+
+ /* 1 or 2 user meta data entries, user words 1,2 and 3,4 */
+ for (m = 1; m < meta_cnt; m += 2) {
+ tx_idx = queue->prod_index & queue->queue_mask;
+ meta = &queue->meta_q[tx_idx];
+ meta->usermeta0 = *user_meta++;
+ meta->usermeta1 = *user_meta++;
+ queue->prod_index++;
+ }
-/* ************************************************************************* */
-uint16_t
-eth_ark_xmit_pkts_noop(void *vtxq __rte_unused,
- struct rte_mbuf **tx_pkts __rte_unused,
- uint16_t nb_pkts __rte_unused)
-{
- return 0;
+ tx_idx = queue->prod_index & queue->queue_mask;
+ meta = &queue->meta_q[tx_idx];
+ meta->physaddr = rte_mbuf_data_iova(mbuf);
+ queue->prod_index++;
}
+
/* ************************************************************************* */
uint16_t
eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct ark_tx_queue *queue;
struct rte_mbuf *mbuf;
- struct ark_tx_meta *meta;
+ uint32_t user_meta[5];
- uint32_t idx;
- uint32_t prod_index_limit;
int stat;
+ int32_t prod_index_limit;
uint16_t nb;
+ uint8_t user_len = 0;
+ const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN;
+ tx_user_meta_hook_fn tx_user_meta_hook;
queue = (struct ark_tx_queue *)vtxq;
+ tx_user_meta_hook = queue->tx_user_meta_hook;
/* free any packets after the HW is done with them */
free_completed_tx(queue);
- prod_index_limit = queue->queue_size + queue->free_index;
+ /* leave 4 elements mpu data */
+ prod_index_limit = queue->queue_size + queue->free_index - 4;
for (nb = 0;
- (nb < nb_pkts) && (queue->prod_index != prod_index_limit);
+ (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0;
++nb) {
mbuf = tx_pkts[nb];
- if (ARK_TX_PAD_TO_60) {
- if (unlikely(rte_pktmbuf_pkt_len(mbuf) < 60)) {
- /* this packet even if it is small can be split,
- * be sure to add to the end mbuf
+ if (min_pkt_len &&
+ unlikely(rte_pktmbuf_pkt_len(mbuf) < min_pkt_len)) {
+ /* this packet even if it is small can be split,
+ * be sure to add to the end mbuf
+ */
+ uint16_t to_add = min_pkt_len -
+ rte_pktmbuf_pkt_len(mbuf);
+ char *appended =
+ rte_pktmbuf_append(mbuf, to_add);
+
+ if (appended == 0) {
+ /* This packet is in error,
+ * we cannot send it so just
+ * count it and delete it.
*/
- uint16_t to_add =
- 60 - rte_pktmbuf_pkt_len(mbuf);
- char *appended =
- rte_pktmbuf_append(mbuf, to_add);
-
- if (appended == 0) {
- /* This packet is in error,
- * we cannot send it so just
- * count it and delete it.
- */
- queue->tx_errors += 1;
- rte_pktmbuf_free(mbuf);
- continue;
- }
- memset(appended, 0, to_add);
+ queue->tx_errors += 1;
+ rte_pktmbuf_free(mbuf);
+ continue;
}
+ memset(appended, 0, to_add);
}
+ if (tx_user_meta_hook)
+ tx_user_meta_hook(mbuf, user_meta, &user_len,
+ queue->ext_user_data);
if (unlikely(mbuf->nb_segs != 1)) {
- stat = eth_ark_tx_jumbo(queue, mbuf);
+ stat = eth_ark_tx_jumbo(queue, mbuf,
+ user_meta, user_len);
if (unlikely(stat != 0))
break; /* Queue is full */
} else {
- idx = queue->prod_index & queue->queue_mask;
- queue->bufs[idx] = mbuf;
- meta = &queue->meta_q[idx];
- eth_ark_tx_meta_from_mbuf(meta,
- mbuf,
- ARK_DDM_SOP |
- ARK_DDM_EOP);
- queue->prod_index++;
+ eth_ark_tx_desc_fill(queue, mbuf,
+ ARK_DDM_SOP | ARK_DDM_EOP,
+ user_meta, user_len);
}
}
- if (ARK_TX_DEBUG && (nb != nb_pkts)) {
- PMD_TX_LOG(DEBUG, "TX: Failure to send:"
+ if (ARK_DEBUG_CORE && nb != nb_pkts) {
+ ARK_PMD_LOG(DEBUG, "TX: Failure to send:"
" req: %" PRIU32
" sent: %" PRIU32
" prod: %" PRIU32
}
/* ************************************************************************* */
-static uint32_t
-eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf)
+static int
+eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf,
+ uint32_t *user_meta, uint8_t meta_cnt)
{
struct rte_mbuf *next;
- struct ark_tx_meta *meta;
- uint32_t free_queue_space;
- uint32_t idx;
+ int32_t free_queue_space;
uint8_t flags = ARK_DDM_SOP;
free_queue_space = queue->queue_mask -
(queue->prod_index - queue->free_index);
- if (unlikely(free_queue_space < mbuf->nb_segs))
+ /* We need up to 4 mbufs for first header and 2 for subsequent ones */
+ if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs))))
return -1;
while (mbuf != NULL) {
next = mbuf->next;
-
- idx = queue->prod_index & queue->queue_mask;
- queue->bufs[idx] = mbuf;
- meta = &queue->meta_q[idx];
-
flags |= (next == NULL) ? ARK_DDM_EOP : 0;
- eth_ark_tx_meta_from_mbuf(meta, mbuf, flags);
- queue->prod_index++;
+
+ eth_ark_tx_desc_fill(queue, mbuf, flags, user_meta, meta_cnt);
flags &= ~ARK_DDM_SOP; /* drop SOP flags */
+ meta_cnt = 0; /* Meta only on SOP */
mbuf = next;
}
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
- struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
+ struct ark_adapter *ark = dev->data->dev_private;
struct ark_tx_queue *queue;
int status;
- /* Future: divide the Q's evenly with multi-ports */
- int port = dev->data->port_id;
- int qidx = port + queue_idx;
+ int qidx = queue_idx;
if (!rte_is_power_of_2(nb_desc)) {
- PMD_DRV_LOG(ERR,
+ ARK_PMD_LOG(ERR,
"DPDK Arkville configuration queue size"
" must be power of two %u (%s)\n",
nb_desc, __func__);
return -1;
}
+ /* Each packet requires at least 2 mpu elements - double desc count */
+ nb_desc = 2 * nb_desc;
+
/* Allocate queue struct */
queue = rte_zmalloc_socket("Ark_txqueue",
sizeof(struct ark_tx_queue),
64,
socket_id);
if (queue == 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate tx "
+ ARK_PMD_LOG(ERR, "Failed to allocate tx "
"queue memory in %s\n",
__func__);
return -ENOMEM;
queue->phys_qid = qidx;
queue->queue_index = queue_idx;
dev->data->tx_queues[queue_idx] = queue;
+ queue->tx_user_meta_hook = ark->user_ext.tx_user_meta_hook;
+ queue->ext_user_data = ark->user_data[dev->data->port_id];
queue->meta_q =
rte_zmalloc_socket("Ark_txqueue meta",
- nb_desc * sizeof(struct ark_tx_meta),
+ nb_desc * sizeof(union ark_tx_meta),
64,
socket_id);
queue->bufs =
socket_id);
if (queue->meta_q == 0 || queue->bufs == 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate "
+ ARK_PMD_LOG(ERR, "Failed to allocate "
"queue memory in %s\n", __func__);
rte_free(queue->meta_q);
rte_free(queue->bufs);
static int
eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
{
- phys_addr_t queue_base, ring_base, cons_index_addr;
+ rte_iova_t queue_base, ring_base, cons_index_addr;
uint32_t write_interval_ns;
/* Verify HW -- MPU */
- if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
+ if (ark_mpu_verify(queue->mpu, sizeof(union ark_tx_meta)))
return -1;
queue_base = rte_malloc_virt2iova(queue);
free_completed_tx(struct ark_tx_queue *queue)
{
struct rte_mbuf *mbuf;
- struct ark_tx_meta *meta;
- uint32_t top_index;
+ union ark_tx_meta *meta;
+ int32_t top_index;
top_index = queue->cons_index; /* read once */
- while (queue->free_index != top_index) {
+ while ((top_index - queue->free_index) > 0) {
meta = &queue->meta_q[queue->free_index & queue->queue_mask];
- mbuf = queue->bufs[queue->free_index & queue->queue_mask];
-
if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
+ mbuf = queue->bufs[queue->free_index &
+ queue->queue_mask];
/* ref count of the mbuf is checked in this call. */
rte_pktmbuf_free(mbuf);
}
- queue->free_index++;
+ queue->free_index += (meta->meta_cnt + 2);
}
}