#include <rte_mempool.h>
#include "otx2_ethdev.h"
+#include "otx2_ethdev_sec.h"
static inline uint64_t
nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
{
uint64_t capa = NIX_RX_OFFLOAD_CAPA;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf(dev) ||
+ dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
return capa;
req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
}
req->rx_cfg |= BIT_ULL(32 /* DROP_RE */);
+ if (dev->rss_tag_as_xor == 0)
+ req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (rc)
dev->cints = rsp->cints;
dev->qints = rsp->qints;
dev->npc_flow.channel = dev->rx_chan_base;
+ dev->ptp_en = rsp->hw_rx_tstamp_en;
return 0;
}
static int
-nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev)
+nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable)
{
struct otx2_mbox *mbox = dev->mbox;
struct npc_set_pkind *req;
/* Notify AF about higig2 config */
req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
req->mode = dev->npc_flow.switch_header_type;
+ if (enable == 0)
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
req->dir = PKIND_RX;
rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (rc)
return rc;
req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
req->mode = dev->npc_flow.switch_header_type;
+ if (enable == 0)
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
req->dir = PKIND_TX;
return otx2_mbox_process_msg(mbox, (void *)&rsp);
}
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf_or_sdp(dev))
- return 0;
+ if (en && otx2_dev_is_vf_or_sdp(dev))
+ return -ENOTSUP;
if (en)
otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
- if (rxmode->mq_mode == ETH_MQ_RX_RSS)
+ if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+ if (!dev->ptype_disable)
+ flags |= NIX_RX_OFFLOAD_PTYPE_F;
+
return flags;
}
goto fail_offloads;
}
- rc = nix_lf_switch_header_type_enable(dev);
+ if (dev->ptp_en &&
+ dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
+ otx2_err("Both PTP and switch header enabled");
+ goto free_nix_lf;
+ }
+
+ rc = nix_lf_switch_header_type_enable(dev, true);
if (rc) {
otx2_err("Failed to enable switch type nix_lf rc=%d", rc);
goto free_nix_lf;
rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
if (rc) {
otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
- goto q_irq_fini;
+ goto cq_fini;
}
rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
if (rc) {
otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
- goto q_irq_fini;
+ goto cq_fini;
}
rc = otx2_nix_mc_addr_list_install(eth_dev);
struct otx2_eth_rxq *rxq;
int count, i, j, rc;
+ nix_lf_switch_header_type_enable(dev, false);
nix_cgx_stop_link_event(dev);
npc_rx_disable(dev);
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
int rc, i;
- if (eth_dev->data->nb_rx_queues != 0) {
+ /* MTU recalculate should be avoided here if PTP is enabled by PF, as
+ * otx2_nix_recalc_mtu would be invoked during otx2_nix_ptp_enable_vf
+ * call below.
+ */
+ if (eth_dev->data->nb_rx_queues != 0 && !otx2_ethdev_is_ptp_en(dev)) {
rc = otx2_nix_recalc_mtu(eth_dev);
if (rc)
return rc;
else
otx2_nix_timesync_disable(eth_dev);
+ /* Update VF about data off shifted by 8 bytes if PTP already
+ * enabled in PF owning this VF
+ */
+ if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev))
+ otx2_nix_ptp_enable_vf(eth_dev);
+
rc = npc_rx_enable(dev);
if (rc) {
otx2_err("Failed to enable NPC rx %d", rc);
.dev_set_link_up = otx2_nix_dev_set_link_up,
.dev_set_link_down = otx2_nix_dev_set_link_down,
.dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
+ .dev_ptypes_set = otx2_nix_ptypes_set,
.dev_reset = otx2_nix_dev_reset,
.stats_get = otx2_nix_dev_stats_get,
.stats_reset = otx2_nix_dev_stats_reset,
.xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
.rxq_info_get = otx2_nix_rxq_info_get,
.txq_info_get = otx2_nix_txq_info_get,
+ .rx_burst_mode_get = otx2_rx_burst_mode_get,
+ .tx_burst_mode_get = otx2_tx_burst_mode_get,
.rx_queue_count = otx2_nix_rx_queue_count,
.rx_descriptor_done = otx2_nix_rx_descriptor_done,
.rx_descriptor_status = otx2_nix_rx_descriptor_status,
dev->configured = 0;
dev->drv_inited = true;
+ dev->ptype_disable = 0;
dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
}
+ /* Create security ctx */
+ rc = otx2_eth_sec_ctx_create(eth_dev);
+ if (rc)
+ goto free_mac_addrs;
+ dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+ dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+
/* Initialize rte-flow */
rc = otx2_flow_init(dev);
if (rc)
- goto free_mac_addrs;
+ goto sec_ctx_destroy;
otx2_nix_mc_filter_init(dev);
dev->rx_offload_capa, dev->tx_offload_capa);
return 0;
+sec_ctx_destroy:
+ otx2_eth_sec_ctx_destroy(eth_dev);
free_mac_addrs:
rte_free(eth_dev->data->mac_addrs);
unregister_irq:
if (rc)
otx2_err("Failed to cleanup npa lf, rc=%d", rc);
+ /* Destroy security ctx */
+ otx2_eth_sec_ctx_destroy(eth_dev);
+
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
dev->drv_inited = false;