#include <rte_mempool.h>
#include "otx2_ethdev.h"
+#include "otx2_ethdev_sec.h"
static inline uint64_t
nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
{
uint64_t capa = NIX_RX_OFFLOAD_CAPA;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf(dev) ||
+ dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
return capa;
static inline uint64_t
nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
{
- RTE_SET_USED(dev);
-
- return NIX_TX_OFFLOAD_CAPA;
+ uint64_t capa = NIX_TX_OFFLOAD_CAPA;
+
+ /* TSO not supported for earlier chip revisions */
+ if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
+ capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ return capa;
}
static const struct otx2_dev_ops otx2_dev_ops = {
req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
}
- req->rx_cfg |= BIT_ULL(32 /* DROP_RE */);
+ req->rx_cfg |= (BIT_ULL(32 /* DROP_RE */) |
+ BIT_ULL(33 /* Outer L2 Length */) |
+ BIT_ULL(38 /* Inner L4 UDP Length */) |
+ BIT_ULL(39 /* Inner L3 Length */) |
+ BIT_ULL(40 /* Outer L4 UDP Length */) |
+ BIT_ULL(41 /* Outer L3 Length */));
+
+ if (dev->rss_tag_as_xor == 0)
+ req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (rc)
dev->cints = rsp->cints;
dev->qints = rsp->qints;
dev->npc_flow.channel = dev->rx_chan_base;
+ dev->ptp_en = rsp->hw_rx_tstamp_en;
return 0;
}
+static int
+nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct npc_set_pkind *req;
+ struct msg_resp *rsp;
+ int rc;
+
+ if (dev->npc_flow.switch_header_type == 0)
+ return 0;
+
+ if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_LEN_90B &&
+ !otx2_dev_is_sdp(dev)) {
+ otx2_err("chlen90b is not supported on non-SDP device");
+ return -EINVAL;
+ }
+
+ /* Notify AF about higig2 config */
+ req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
+ req->mode = dev->npc_flow.switch_header_type;
+ if (enable == 0)
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ req->dir = PKIND_RX;
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
+ req->mode = dev->npc_flow.switch_header_type;
+ if (enable == 0)
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ req->dir = PKIND_TX;
+ return otx2_mbox_process_msg(mbox, (void *)&rsp);
+}
+
static int
nix_lf_free(struct otx2_eth_dev *dev)
{
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
- return 0;
+ if (en && otx2_dev_is_vf_or_sdp(dev))
+ return -ENOTSUP;
if (en)
otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
NIX_CQ_ALIGN, dev->node);
if (rz == NULL) {
otx2_err("Failed to allocate mem for cq hw ring");
- rc = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
memset(rz->addr, 0, rz->len);
rxq->desc = (uintptr_t)rz->addr;
rc = otx2_mbox_process(mbox);
if (rc) {
otx2_err("Failed to init cq context");
- goto fail;
+ return rc;
}
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->op = NIX_AQ_INSTOP_INIT;
aq->rq.sso_ena = 0;
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+ aq->rq.ipsech_ena = 1;
+
aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
aq->rq.spb_ena = 0;
aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
aq->rq.first_skip = first_skip;
aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
aq->rq.flow_tagw = 32; /* 32-bits */
- aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp);
- aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp);
- aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf);
- aq->rq.lpb_sizem1 /= 8;
+ aq->rq.lpb_sizem1 = mp->elt_size / 8;
aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
aq->rq.ena = 1;
aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
rc = otx2_mbox_process(mbox);
if (rc) {
otx2_err("Failed to init rq context");
- goto fail;
+ return rc;
+ }
+
+ if (dev->lock_rx_ctx) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_LOCK;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0) {
+ otx2_err("Failed to LOCK cq context");
+ return rc;
+ }
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ otx2_err("Failed to LOCK rq context");
+ return -ENOMEM;
+ }
+ }
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_LOCK;
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to LOCK rq context");
+ return rc;
+ }
}
return 0;
-fail:
- return rc;
}
static int
return rc;
}
+ if (dev->lock_rx_ctx) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = rxq->rq;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_UNLOCK;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0) {
+ otx2_err("Failed to UNLOCK cq context");
+ return rc;
+ }
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ otx2_err("Failed to UNLOCK rq context");
+ return -ENOMEM;
+ }
+ }
+ aq->qidx = rxq->rq;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_UNLOCK;
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to UNLOCK rq context");
+ return rc;
+ }
+ }
+
return 0;
}
if (eth_dev->data->rx_queues[rq] != NULL) {
otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
+ rte_eth_dma_zone_free(eth_dev, "cq", rq);
eth_dev->data->rx_queues[rq] = NULL;
}
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
- if (rxmode->mq_mode == ETH_MQ_RX_RSS)
+ if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ flags |= NIX_RX_OFFLOAD_SECURITY_F;
+
+ if (!dev->ptype_disable)
+ flags |= NIX_RX_OFFLOAD_PTYPE_F;
+
return flags;
}
if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
+ /* Enable Inner checksum for TSO */
+ if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+ flags |= (NIX_TX_OFFLOAD_TSO_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+
+ /* Enable Inner and Outer checksum for Tunnel TSO */
+ if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ flags |= (NIX_TX_OFFLOAD_TSO_F |
+ NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+
+ if (conf & DEV_TX_OFFLOAD_SECURITY)
+ flags |= NIX_TX_OFFLOAD_SECURITY_F;
+
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_TX_OFFLOAD_TSTAMP_F;
return flags;
}
+static int
+nix_sqb_lock(struct rte_mempool *mp)
+{
+ struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+ struct npa_aq_enq_req *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_LOCK;
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ if (!req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(npa_lf->mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
+ if (rc < 0) {
+ otx2_err("Failed to LOCK AURA context");
+ return rc;
+ }
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ if (!req) {
+ otx2_err("Failed to LOCK POOL context");
+ return -ENOMEM;
+ }
+ }
+
+ req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ req->ctype = NPA_AQ_CTYPE_POOL;
+ req->op = NPA_AQ_INSTOP_LOCK;
+
+ rc = otx2_mbox_process(npa_lf->mbox);
+ if (rc < 0) {
+ otx2_err("Unable to lock POOL in NDC");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+nix_sqb_unlock(struct rte_mempool *mp)
+{
+ struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+ struct npa_aq_enq_req *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ if (!req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(npa_lf->mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
+ if (rc < 0) {
+ otx2_err("Failed to UNLOCK AURA context");
+ return rc;
+ }
+
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ if (!req) {
+ otx2_err("Failed to UNLOCK POOL context");
+ return -ENOMEM;
+ }
+ }
+ req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ req->ctype = NPA_AQ_CTYPE_POOL;
+ req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(npa_lf->mbox);
+ if (rc < 0) {
+ otx2_err("Unable to UNLOCK AURA in NDC");
+ return rc;
+ }
+
+ return 0;
+}
+
static int
nix_sq_init(struct otx2_eth_txq *txq)
{
/* Many to one reduction */
sq->sq.qint_idx = txq->sq % dev->qints;
- return otx2_mbox_process(mbox);
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0)
+ return rc;
+
+ if (dev->lock_tx_ctx) {
+ sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ sq->qidx = txq->sq;
+ sq->ctype = NIX_AQ_CTYPE_SQ;
+ sq->op = NIX_AQ_INSTOP_LOCK;
+
+ rc = otx2_mbox_process(mbox);
+ }
+
+ return rc;
}
static int
if (rc)
return rc;
+ if (dev->lock_tx_ctx) {
+ /* Unlock sq */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = txq->sq;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0)
+ return rc;
+
+ nix_sqb_unlock(txq->sqb_pool);
+ }
+
/* Read SQ and free sqb's */
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->qidx = txq->sq;
}
nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
+ if (dev->lock_tx_ctx)
+ nix_sqb_lock(txq->sqb_pool);
return 0;
fail:
otx2_nix_dbg("Releasing txq %u", txq->sq);
/* Flush and disable tm */
- otx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started);
+ otx2_nix_sq_flush_pre(txq, eth_dev->data->dev_started);
/* Free sqb's and disable sq */
nix_sq_uninit(txq);
rte_mempool_free(txq->sqb_pool);
txq->sqb_pool = NULL;
}
+ otx2_nix_sq_flush_post(txq);
rte_free(txq);
}
txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
for (i = 0; i < nb_txq; i++) {
if (txq[i] == NULL) {
- otx2_err("txq[%d] is already released", i);
- goto fail;
+ tx_qconf[i].valid = false;
+ otx2_info("txq[%d] is already released", i);
+ continue;
}
memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
+ tx_qconf[i].valid = true;
otx2_nix_tx_queue_release(txq[i]);
eth_dev->data->tx_queues[i] = NULL;
}
rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
for (i = 0; i < nb_rxq; i++) {
if (rxq[i] == NULL) {
- otx2_err("rxq[%d] is already released", i);
- goto fail;
+ rx_qconf[i].valid = false;
+ otx2_info("rxq[%d] is already released", i);
+ continue;
}
memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
+ rx_qconf[i].valid = true;
otx2_nix_rx_queue_release(rxq[i]);
eth_dev->data->rx_queues[i] = NULL;
}
* queues are already setup in port_configure().
*/
for (i = 0; i < nb_txq; i++) {
+ if (!tx_qconf[i].valid)
+ continue;
rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
tx_qconf[i].socket_id,
&tx_qconf[i].conf.tx);
free(tx_qconf); tx_qconf = NULL;
for (i = 0; i < nb_rxq; i++) {
+ if (!rx_qconf[i].valid)
+ continue;
rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
rx_qconf[i].socket_id,
&rx_qconf[i].conf.rx,
rte_mb();
}
+static void
+nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
+{
+ volatile struct nix_lso_format *field;
+
+ /* Format works only with TCP packet marked by OL3/OL4 */
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static void
+nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req,
+ bool outer_v4, bool inner_v4)
+{
+ volatile struct nix_lso_format *field;
+
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 len */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = outer_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (outer_v4) {
+ /* IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Outer UDP length */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+
+ /* Inner IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = inner_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (inner_v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static void
+nix_lso_tun_tcp(struct nix_lso_format_cfg *req,
+ bool outer_v4, bool inner_v4)
+{
+ volatile struct nix_lso_format *field;
+
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 len */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = outer_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (outer_v4) {
+ /* IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Inner IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = inner_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (inner_v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static int
+nix_setup_lso_formats(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_lso_format_cfg_rsp *rsp;
+ struct nix_lso_format_cfg *req;
+ uint8_t base;
+ int rc;
+
+ /* Skip if TSO was not requested */
+ if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F))
+ return 0;
+ /*
+ * IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tcp(req, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ base = rsp->lso_format_idx;
+ if (base != NIX_LSO_FORMAT_IDX_TSOV4)
+ return -EFAULT;
+ dev->lso_base_idx = base;
+ otx2_nix_dbg("tcpv4 lso fmt=%u", base);
+
+
+ /*
+ * IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tcp(req, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 1)
+ return -EFAULT;
+ otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1);
+
+ /*
+ * IPv4/UDP/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, true, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 2)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2);
+
+ /*
+ * IPv4/UDP/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, true, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 3)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3);
+
+ /*
+ * IPv6/UDP/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, false, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 4)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4);
+
+ /*
+ * IPv6/UDP/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, false, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ if (rsp->lso_format_idx != base + 5)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5);
+
+ /*
+ * IPv4/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, true, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 6)
+ return -EFAULT;
+ otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6);
+
+ /*
+ * IPv4/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, true, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 7)
+ return -EFAULT;
+ otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7);
+
+ /*
+ * IPv6/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, false, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 8)
+ return -EFAULT;
+ otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8);
+
+ /*
+ * IPv6/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, false, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ if (rsp->lso_format_idx != base + 9)
+ return -EFAULT;
+ otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9);
+ return 0;
+}
+
static int
otx2_nix_configure(struct rte_eth_dev *eth_dev)
{
goto fail_configure;
}
- if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
- otx2_err("Setting link speed/duplex not supported");
- goto fail_configure;
- }
-
if (conf->dcb_capability_en == 1) {
otx2_err("dcb enable is not supported");
goto fail_configure;
/* Free the resources allocated from the previous configure */
if (dev->configured == 1) {
+ otx2_eth_sec_fini(eth_dev);
otx2_nix_rxchan_bpid_cfg(eth_dev, false);
otx2_nix_vlan_fini(eth_dev);
+ otx2_nix_mc_addr_list_uninstall(eth_dev);
otx2_flow_free_all_resources(dev);
oxt2_nix_unregister_queue_irqs(eth_dev);
if (eth_dev->data->dev_conf.intr_conf.rxq)
goto fail_offloads;
}
+ otx2_nix_err_intr_enb_dis(eth_dev, true);
+ otx2_nix_ras_intr_enb_dis(eth_dev, true);
+
+ if (dev->ptp_en &&
+ dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
+ otx2_err("Both PTP and switch header enabled");
+ goto free_nix_lf;
+ }
+
+ rc = nix_lf_switch_header_type_enable(dev, true);
+ if (rc) {
+ otx2_err("Failed to enable switch type nix_lf rc=%d", rc);
+ goto free_nix_lf;
+ }
+
+ rc = nix_setup_lso_formats(dev);
+ if (rc) {
+ otx2_err("failed to setup nix lso format fields, rc=%d", rc);
+ goto free_nix_lf;
+ }
+
/* Configure RSS */
rc = otx2_nix_rss_config(eth_dev);
if (rc) {
rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
if (rc) {
otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
- goto q_irq_fini;
+ goto cq_fini;
}
rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
if (rc) {
otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
- goto q_irq_fini;
+ goto cq_fini;
+ }
+
+ /* Enable security */
+ rc = otx2_eth_sec_init(eth_dev);
+ if (rc)
+ goto cq_fini;
+
+ rc = otx2_nix_flow_ctrl_init(eth_dev);
+ if (rc) {
+ otx2_err("Failed to init flow ctrl mode %d", rc);
+ goto cq_fini;
+ }
+
+ rc = otx2_nix_mc_addr_list_install(eth_dev);
+ if (rc < 0) {
+ otx2_err("Failed to install mc address list rc=%d", rc);
+ goto sec_fini;
}
/*
if (dev->configured == 1) {
rc = nix_restore_queue_cfg(eth_dev);
if (rc)
- goto cq_fini;
+ goto uninstall_mc_list;
}
/* Update the mac address */
rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
+ /* Apply new link configurations if changed */
+ rc = otx2_apply_link_speed(eth_dev);
+ if (rc) {
+ otx2_err("Failed to set link configuration");
+ goto uninstall_mc_list;
+ }
+
otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
" rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
" rx_flags=0x%x tx_flags=0x%x",
dev->configured_nb_tx_qs = data->nb_tx_queues;
return 0;
+uninstall_mc_list:
+ otx2_nix_mc_addr_list_uninstall(eth_dev);
+sec_fini:
+ otx2_eth_sec_fini(eth_dev);
cq_fini:
oxt2_nix_unregister_cq_irqs(eth_dev);
q_irq_fini:
struct otx2_eth_rxq *rxq;
int count, i, j, rc;
+ nix_lf_switch_header_type_enable(dev, false);
nix_cgx_stop_link_event(dev);
npc_rx_disable(dev);
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
int rc, i;
- if (eth_dev->data->nb_rx_queues != 0) {
+ /* MTU recalculate should be avoided here if PTP is enabled by PF, as
+ * otx2_nix_recalc_mtu would be invoked during otx2_nix_ptp_enable_vf
+ * call below.
+ */
+ if (eth_dev->data->nb_rx_queues != 0 && !otx2_ethdev_is_ptp_en(dev)) {
rc = otx2_nix_recalc_mtu(eth_dev);
if (rc)
return rc;
else
otx2_nix_timesync_disable(eth_dev);
+ /* Update VF about data off shifted by 8 bytes if PTP already
+ * enabled in PF owning this VF
+ */
+ if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev))
+ otx2_nix_ptp_enable_vf(eth_dev);
+
rc = npc_rx_enable(dev);
if (rc) {
otx2_err("Failed to enable NPC rx %d", rc);
.link_update = otx2_nix_link_update,
.tx_queue_setup = otx2_nix_tx_queue_setup,
.tx_queue_release = otx2_nix_tx_queue_release,
+ .tm_ops_get = otx2_nix_tm_ops_get,
.rx_queue_setup = otx2_nix_rx_queue_setup,
.rx_queue_release = otx2_nix_rx_queue_release,
.dev_start = otx2_nix_dev_start,
.dev_set_link_up = otx2_nix_dev_set_link_up,
.dev_set_link_down = otx2_nix_dev_set_link_down,
.dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
+ .dev_ptypes_set = otx2_nix_ptypes_set,
.dev_reset = otx2_nix_dev_reset,
.stats_get = otx2_nix_dev_stats_get,
.stats_reset = otx2_nix_dev_stats_reset,
.mac_addr_add = otx2_nix_mac_addr_add,
.mac_addr_remove = otx2_nix_mac_addr_del,
.mac_addr_set = otx2_nix_mac_addr_set,
+ .set_mc_addr_list = otx2_nix_set_mc_addr_list,
.promiscuous_enable = otx2_nix_promisc_enable,
.promiscuous_disable = otx2_nix_promisc_disable,
.allmulticast_enable = otx2_nix_allmulticast_enable,
.xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
.rxq_info_get = otx2_nix_rxq_info_get,
.txq_info_get = otx2_nix_txq_info_get,
- .rx_queue_count = otx2_nix_rx_queue_count,
- .rx_descriptor_done = otx2_nix_rx_descriptor_done,
- .rx_descriptor_status = otx2_nix_rx_descriptor_status,
+ .rx_burst_mode_get = otx2_rx_burst_mode_get,
+ .tx_burst_mode_get = otx2_tx_burst_mode_get,
.tx_done_cleanup = otx2_nix_tx_done_cleanup,
+ .set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit,
.pool_ops_supported = otx2_nix_pool_ops_supported,
.filter_ctrl = otx2_nix_dev_filter_ctrl,
.get_module_info = otx2_nix_get_module_info,
return otx2_mbox_process(mbox);
}
+static bool
+otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev)
+{
+ if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF ||
+ pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
+ return true;
+ return false;
+}
+
+static inline uint64_t
+nix_get_blkaddr(struct otx2_eth_dev *dev)
+{
+ uint64_t reg;
+
+ /* Reading the discovery register to know which NIX is the LF
+ * attached to.
+ */
+ reg = otx2_read64(dev->bar2 +
+ RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_NIX0));
+
+ return reg & 0x1FFULL ? RVU_BLOCK_ADDR_NIX0 : RVU_BLOCK_ADDR_NIX1;
+}
+
static int
otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
{
int rc, max_entries;
eth_dev->dev_ops = &otx2_eth_dev_ops;
+ eth_dev->rx_descriptor_done = otx2_nix_rx_descriptor_done;
+ eth_dev->rx_queue_count = otx2_nix_rx_queue_count;
+ eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
goto error;
}
}
+ if (otx2_eth_dev_is_sdp(pci_dev))
+ dev->sdp_link = true;
+ else
+ dev->sdp_link = false;
/* Device generic callbacks */
dev->ops = &otx2_dev_ops;
dev->eth_dev = eth_dev;
dev->configured = 0;
dev->drv_inited = true;
- dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
+ dev->ptype_disable = 0;
dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
/* Attach NIX LF */
if (rc)
goto otx2_npa_uninit;
+ dev->base = dev->bar2 + (nix_get_blkaddr(dev) << 20);
+
/* Get NIX MSIX offset */
rc = nix_lf_get_msix_offset(dev);
if (rc)
dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
}
+ /* Create security ctx */
+ rc = otx2_eth_sec_ctx_create(eth_dev);
+ if (rc)
+ goto free_mac_addrs;
+ dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+ dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+
/* Initialize rte-flow */
rc = otx2_flow_init(dev);
if (rc)
- goto free_mac_addrs;
+ goto sec_ctx_destroy;
+
+ otx2_nix_mc_filter_init(dev);
otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
" rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
dev->rx_offload_capa, dev->tx_offload_capa);
return 0;
+sec_ctx_destroy:
+ otx2_eth_sec_ctx_destroy(eth_dev);
free_mac_addrs:
rte_free(eth_dev->data->mac_addrs);
unregister_irq:
/* Disable other rte_flow entries */
otx2_flow_fini(dev);
+ /* Free multicast filter list */
+ otx2_nix_mc_filter_fini(dev);
+
/* Disable PTP if already enabled */
if (otx2_ethdev_is_ptp_en(dev))
otx2_nix_timesync_disable(eth_dev);
if (rc)
otx2_err("Failed to cleanup npa lf, rc=%d", rc);
+ /* Disable security */
+ otx2_eth_sec_fini(eth_dev);
+
+ /* Destroy security ctx */
+ otx2_eth_sec_ctx_destroy(eth_dev);
+
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
dev->drv_inited = false;
RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_RVU_AF_VF)
},
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_SDP_PF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
+ },
{
.vendor_id = 0,
},