Lock-free Tx queue = Y
Queue start/stop = P
Jumbo frame = Y
+Scattered Rx = Y
Promiscuous mode = Y
Unicast MAC filter = Y
CRC offload = Y
- Promiscuous mode
- Port hardware statistics
- Jumbo frames
+- Scatter-Gather IO support
- Link state information
- SR-IOV VF
- Multiple queues for TX
dq = &txq->dq;
if (__octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va, dq->fc_status_va,
- m) < 0)
+ m, OCCTX_TX_OFFLOAD_NONE) < 0)
return 0;
return 1;
LF_UDP_VXLAN = OCCTX_PKI_LTYPE_UDP_VXLAN,
LF_NVGRE = OCCTX_PKI_LTYPE_NVGRE,
};
+
+/* Word 0 of HW segment buflink structure */
+typedef union octtx_pki_buflink_w0_u {
+ uint64_t v;
+ struct {
+ uint64_t size:16;
+ uint64_t rsvd1:15;
+ uint64_t invfree:1;
+ /** Aura number of the next segment */
+ uint64_t aura:16;
+ uint64_t sw:9;
+ uint64_t later_invfree:1;
+ uint64_t rsvd2:5;
+ /** 1 if aura number is set */
+ uint64_t has_aura:1;
+ } s;
+} octtx_pki_buflink_w0_t;
+
+/* Word 1 of HW segment buflink structure */
+typedef union octtx_pki_buflink_w1_u {
+ uint64_t v;
+ struct {
+ uint64_t addr;
+ } s;
+} octtx_pki_buflink_w1_t;
+
+/* HW structure linking packet segments into singly linked list */
+typedef struct octtx_pki_buflink_s {
+ octtx_pki_buflink_w0_t w0; /* Word 0 of the buflink */
+ octtx_pki_buflink_w1_t w1; /* Word 1 of the buflink */
+} octtx_pki_buflink_t;
+
#endif /* __OCTEONTX_PKI_VAR_H__ */
/* pko_send_hdr_s + pko_send_link */
#define PKO_CMD_SZ (2 << 1)
-#define PKO_SEND_GATHER_SUBDC (0x0ull << 60)
+#define PKO_SEND_BUFLINK_SUBDC (0x0ull << 60)
+#define PKO_SEND_BUFLINK_LDTYPE(x) ((x) << 58)
+#define PKO_SEND_BUFLINK_GAUAR(x) ((x) << 24)
+#define PKO_SEND_GATHER_SUBDC (0x2ull << 60)
#define PKO_SEND_GATHER_LDTYPE(x) ((x) << 58)
#define PKO_SEND_GATHER_GAUAR(x) ((x) << 24)
#include "octeontx_rxtx.h"
#include "octeontx_logs.h"
+struct evdev_priv_data {
+ OFFLOAD_FLAGS; /*Sequence should not be changed */
+} __rte_cache_aligned;
+
struct octeontx_vdev_init_params {
uint8_t nr_port;
};
info->max_num_events;
}
+static uint16_t
+octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ uint16_t flags = 0;
+
+ /* Created function for supoorting future offloads */
+ if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ flags |= OCCTX_TX_MULTI_SEG_F;
+
+ return flags;
+}
+
+static uint16_t
+octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ uint16_t flags = 0;
+
+ if (rxmode->mq_mode == ETH_MQ_RX_RSS)
+ flags |= OCCTX_RX_OFFLOAD_RSS_F;
+
+ if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+ flags |= OCCTX_RX_MULTI_SEG_F;
+ eth_dev->data->scattered_rx = 1;
+ /* If scatter mode is enabled, TX should also be in multi
+ * seg mode, else memory leak will occur
+ */
+ nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ }
+
+ return flags;
+}
+
static int
octeontx_dev_configure(struct rte_eth_dev *dev)
{
nic->pki.hash_enable = true;
nic->pki.initialized = false;
+ nic->rx_offloads |= rxmode->offloads;
+ nic->tx_offloads |= txmode->offloads;
+ nic->rx_offload_flags |= octeontx_rx_offload_flags(dev);
+ nic->tx_offload_flags |= octeontx_tx_offload_flags(dev);
+
return 0;
}
dev->rx_pkt_burst = NULL;
}
+static int
+octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
+{
+ struct rte_eth_dev *eth_dev = rxq->eth_dev;
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct evdev_priv_data *evdev_priv;
+ struct rte_eventdev *dev;
+ uint32_t buffsz;
+
+ /* Get rx buffer size */
+ mbp_priv = rte_mempool_get_priv(rxq->pool);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ /* Setup scatter mode if needed by jumbo */
+ if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
+ nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
+ }
+
+ /* Sharing offload flags via eventdev priv region */
+ dev = &rte_eventdevs[rxq->evdev];
+ evdev_priv = dev->data->dev_private;
+ evdev_priv->rx_offload_flags = nic->rx_offload_flags;
+ evdev_priv->tx_offload_flags = nic->tx_offload_flags;
+
+ return 0;
+}
+
+static void
+octeontx_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ const eth_tx_burst_t tx_burst_func[2] = {
+ [0] = octeontx_xmit_pkts,
+ [1] = octeontx_xmit_pkts_mseg,
+ };
+
+ dev->tx_pkt_burst =
+ tx_burst_func[!!(nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)];
+}
+
static int
octeontx_dev_start(struct rte_eth_dev *dev)
{
/*
* Tx start
*/
- dev->tx_pkt_burst = octeontx_xmit_pkts;
+ octeontx_set_tx_function(dev);
ret = octeontx_pko_channel_start(nic->base_ochan);
if (ret < 0) {
octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d",
struct rte_ether_addr *addr)
{
struct octeontx_nic *nic = octeontx_pmd_priv(dev);
- uint8_t prom_mode = dev->data->promiscuous;
int ret;
- dev->data->promiscuous = 0;
ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes);
if (ret == 0) {
/* Update same mac address to BGX CAM table */
0);
}
if (ret < 0) {
- dev->data->promiscuous = prom_mode;
octeontx_log_err("failed to set MAC address on port %d",
nic->port_id);
}
rxq->evdev = nic->evdev;
rxq->ev_queues = ev_queues;
rxq->ev_ports = ev_ports;
+ rxq->pool = mb_pool;
+ octeontx_recheck_rx_offloads(rxq);
dev->data->rx_queues[qidx] = rxq;
dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
#define OCTEONTX_MAX_BGX_PORTS 4
#define OCTEONTX_MAX_LMAC_PER_BGX 4
-#define OCTEONTX_RX_OFFLOADS DEV_RX_OFFLOAD_CHECKSUM
-#define OCTEONTX_TX_OFFLOADS DEV_TX_OFFLOAD_MT_LOCKFREE
+#define OCTEONTX_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM | \
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME)
+
+#define OCTEONTX_TX_OFFLOADS (DEV_TX_OFFLOAD_MT_LOCKFREE | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
static inline struct octeontx_nic *
octeontx_pmd_priv(struct rte_eth_dev *dev)
uint16_t ev_queues;
uint16_t ev_ports;
+ uint64_t rx_offloads;
+ uint16_t rx_offload_flags;
+ uint64_t tx_offloads;
+ uint16_t tx_offload_flags;
} __rte_cache_aligned;
struct octeontx_txq {
struct rte_eth_dev *eth_dev;
uint16_t ev_queues;
uint16_t ev_ports;
+ struct rte_mempool *pool;
} __rte_cache_aligned;
#endif /* __OCTEONTX_ETHDEV_H__ */
rte_cio_wmb();
while (count < nb_pkts) {
res = __octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va,
- dq->fc_status_va,
- tx_pkts[count]);
+ dq->fc_status_va, tx_pkts[count],
+ OCCTX_TX_OFFLOAD_NONE);
+ if (res < 0)
+ break;
+
+ count++;
+ }
+
+ return count; /* return number of pkts transmitted */
+}
+
+uint16_t __hot
+octeontx_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int count;
+ struct octeontx_txq *txq = tx_queue;
+ octeontx_dq_t *dq = &txq->dq;
+ int res;
+
+ count = 0;
+
+ rte_cio_wmb();
+ while (count < nb_pkts) {
+ res = __octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va,
+ dq->fc_status_va, tx_pkts[count],
+ OCCTX_TX_OFFLOAD_NONE |
+ OCCTX_TX_MULTI_SEG_F);
if (res < 0)
break;
#include <rte_ethdev_driver.h>
+#define OFFLOAD_FLAGS \
+ uint16_t rx_offload_flags; \
+ uint16_t tx_offload_flags
+
+#define BIT(nr) (1UL << (nr))
+
+#define OCCTX_RX_OFFLOAD_NONE (0)
+#define OCCTX_RX_OFFLOAD_RSS_F BIT(0)
+#define OCCTX_RX_MULTI_SEG_F BIT(15)
+
+#define OCCTX_TX_OFFLOAD_NONE (0)
+
+#define OCCTX_TX_MULTI_SEG_F BIT(15)
/* Packet type table */
#define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST
static __rte_always_inline int
__octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
- struct rte_mbuf *tx_pkt)
+ struct rte_mbuf *tx_pkt, const uint16_t flag)
{
- uint64_t cmd_buf[4] __rte_cache_aligned;
- uint16_t gaura_id;
+ uint8_t sz = (4 + (!!(flag & OCCTX_TX_MULTI_SEG_F) * 10));
+ /* Max size of PKO SEND desc is 112 bytes*/
+ uint64_t cmd_buf[sz] __rte_cache_aligned;
+ uint8_t nb_segs, nb_desc = 0;
+ uint16_t gaura_id, len = 0;
+ struct rte_mbuf *m_next = NULL;
if (unlikely(*((volatile int64_t *)fc_status_va) < 0))
return -ENOSPC;
- /* Get the gaura Id */
- gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)tx_pkt->pool->pool_id);
-
- /* Setup PKO_SEND_HDR_S */
- cmd_buf[0] = tx_pkt->data_len & 0xffff;
- cmd_buf[1] = 0x0;
- /* Set don't free bit if reference count > 1 */
- if (rte_mbuf_refcnt_read(tx_pkt) > 1)
- cmd_buf[0] |= (1ULL << 58); /* SET DF */
-
- /* Setup PKO_SEND_GATHER_S */
- cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
- cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
- PKO_SEND_GATHER_LDTYPE(0x1ull) |
- PKO_SEND_GATHER_GAUAR((long)gaura_id) |
- tx_pkt->data_len;
-
- octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, PKO_CMD_SZ);
+ if (flag & OCCTX_TX_MULTI_SEG_F) {
+ nb_segs = tx_pkt->nb_segs;
+ /* Setup PKO_SEND_HDR_S */
+ cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
+ cmd_buf[nb_desc++] = 0x0;
+
+ do {
+ m_next = tx_pkt->next;
+ /* To handle case where mbufs belong to diff pools, like
+ * fragmentation
+ */
+ gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
+ tx_pkt->pool->pool_id);
+
+ /* Setup PKO_SEND_GATHER_S */
+ cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC |
+ PKO_SEND_GATHER_LDTYPE(0x1ull) |
+ PKO_SEND_GATHER_GAUAR((long)
+ gaura_id) |
+ tx_pkt->data_len;
+ /* Mark mempool object as "put" since it is freed by
+ * PKO.
+ */
+ if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
+ tx_pkt->next = NULL;
+ __mempool_check_cookies(tx_pkt->pool,
+ (void **)&tx_pkt, 1, 0);
+ }
+ nb_desc++;
+
+ cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
+
+ nb_segs--;
+ len += tx_pkt->data_len;
+ tx_pkt = m_next;
+ } while (nb_segs);
+ } else {
+ /* Setup PKO_SEND_HDR_S */
+ cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
+ cmd_buf[nb_desc++] = 0x0;
+
+ /* Mark mempool object as "put" since it is freed by PKO */
+ if (!(cmd_buf[0] & (1ULL << 58)))
+ __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
+ 1, 0);
+ /* Get the gaura Id */
+ gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
+ tx_pkt->pool->pool_id);
+
+ /* Setup PKO_SEND_BUFLINK_S */
+ cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
+ PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
+ PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
+ tx_pkt->data_len;
+ cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
+ }
+ octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, nb_desc);
return 0;
}
uint16_t
octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+uint16_t
+octeontx_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t
octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);