static inline uint64_t
nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
{
- RTE_SET_USED(dev);
-
- return NIX_TX_OFFLOAD_CAPA;
+ uint64_t capa = NIX_TX_OFFLOAD_CAPA;
+
+ /* TSO not supported for earlier chip revisions */
+ if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
+ capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ return capa;
}
static const struct otx2_dev_ops otx2_dev_ops = {
return 0;
}
+static int
+nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct npc_set_pkind *req;
+ struct msg_resp *rsp;
+ int rc;
+
+ if (dev->npc_flow.switch_header_type == 0)
+ return 0;
+
+ /* Notify AF about higig2 config */
+ req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
+ req->mode = dev->npc_flow.switch_header_type;
+ req->dir = PKIND_RX;
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
+ req->mode = dev->npc_flow.switch_header_type;
+ req->dir = PKIND_TX;
+ return otx2_mbox_process_msg(mbox, (void *)&rsp);
+}
+
static int
nix_lf_free(struct otx2_eth_dev *dev)
{
if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
+ /* Enable Inner checksum for TSO */
+ if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+ flags |= (NIX_TX_OFFLOAD_TSO_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+
+ /* Enable Inner and Outer checksum for Tunnel TSO */
+ if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ flags |= (NIX_TX_OFFLOAD_TSO_F |
+ NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_TX_OFFLOAD_TSTAMP_F;
while (count) {
void *next_sqb;
- next_sqb = *(void **)((uintptr_t)sqb_buf + ((sqes_per_sqb - 1) *
+ next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t)
+ ((sqes_per_sqb - 1) *
nix_sq_max_sqe_sz(txq)));
npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
(uint64_t)sqb_buf);
rte_mb();
}
+static void
+nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
+{
+ volatile struct nix_lso_format *field;
+
+ /* Format works only with TCP packet marked by OL3/OL4 */
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static void
+nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req,
+ bool outer_v4, bool inner_v4)
+{
+ volatile struct nix_lso_format *field;
+
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 len */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = outer_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (outer_v4) {
+ /* IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Outer UDP length */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+
+ /* Inner IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = inner_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (inner_v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static void
+nix_lso_tun_tcp(struct nix_lso_format_cfg *req,
+ bool outer_v4, bool inner_v4)
+{
+ volatile struct nix_lso_format *field;
+
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 len */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = outer_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (outer_v4) {
+ /* IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Inner IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = inner_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (inner_v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static int
+nix_setup_lso_formats(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_lso_format_cfg_rsp *rsp;
+ struct nix_lso_format_cfg *req;
+ uint8_t base;
+ int rc;
+
+ /* Skip if TSO was not requested */
+ if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F))
+ return 0;
+ /*
+ * IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tcp(req, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ base = rsp->lso_format_idx;
+ if (base != NIX_LSO_FORMAT_IDX_TSOV4)
+ return -EFAULT;
+ dev->lso_base_idx = base;
+ otx2_nix_dbg("tcpv4 lso fmt=%u", base);
+
+
+ /*
+ * IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tcp(req, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 1)
+ return -EFAULT;
+ otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1);
+
+ /*
+ * IPv4/UDP/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, true, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 2)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2);
+
+ /*
+ * IPv4/UDP/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, true, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 3)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3);
+
+ /*
+ * IPv6/UDP/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, false, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 4)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4);
+
+ /*
+ * IPv6/UDP/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, false, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ if (rsp->lso_format_idx != base + 5)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5);
+
+ /*
+ * IPv4/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, true, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 6)
+ return -EFAULT;
+ otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6);
+
+ /*
+ * IPv4/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, true, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 7)
+ return -EFAULT;
+ otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7);
+
+ /*
+ * IPv6/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, false, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 8)
+ return -EFAULT;
+ otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8);
+
+ /*
+ * IPv6/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, false, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ if (rsp->lso_format_idx != base + 9)
+ return -EFAULT;
+ otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9);
+ return 0;
+}
+
static int
otx2_nix_configure(struct rte_eth_dev *eth_dev)
{
if (dev->configured == 1) {
otx2_nix_rxchan_bpid_cfg(eth_dev, false);
otx2_nix_vlan_fini(eth_dev);
+ otx2_nix_mc_addr_list_uninstall(eth_dev);
otx2_flow_free_all_resources(dev);
oxt2_nix_unregister_queue_irqs(eth_dev);
if (eth_dev->data->dev_conf.intr_conf.rxq)
goto fail_offloads;
}
+ rc = nix_lf_switch_header_type_enable(dev);
+ if (rc) {
+ otx2_err("Failed to enable switch type nix_lf rc=%d", rc);
+ goto free_nix_lf;
+ }
+
+ rc = nix_setup_lso_formats(dev);
+ if (rc) {
+ otx2_err("failed to setup nix lso format fields, rc=%d", rc);
+ goto free_nix_lf;
+ }
+
/* Configure RSS */
rc = otx2_nix_rss_config(eth_dev);
if (rc) {
goto q_irq_fini;
}
+ rc = otx2_nix_mc_addr_list_install(eth_dev);
+ if (rc < 0) {
+ otx2_err("Failed to install mc address list rc=%d", rc);
+ goto cq_fini;
+ }
+
/*
* Restore queue config when reconfigure followed by
* reconfigure and no queue configure invoked from application case.
if (dev->configured == 1) {
rc = nix_restore_queue_cfg(eth_dev);
if (rc)
- goto cq_fini;
+ goto uninstall_mc_list;
}
/* Update the mac address */
dev->configured_nb_tx_qs = data->nb_tx_queues;
return 0;
+uninstall_mc_list:
+ otx2_nix_mc_addr_list_uninstall(eth_dev);
cq_fini:
oxt2_nix_unregister_cq_irqs(eth_dev);
q_irq_fini:
.mac_addr_add = otx2_nix_mac_addr_add,
.mac_addr_remove = otx2_nix_mac_addr_del,
.mac_addr_set = otx2_nix_mac_addr_set,
+ .set_mc_addr_list = otx2_nix_set_mc_addr_list,
.promiscuous_enable = otx2_nix_promisc_enable,
.promiscuous_disable = otx2_nix_promisc_disable,
.allmulticast_enable = otx2_nix_allmulticast_enable,
.rx_queue_count = otx2_nix_rx_queue_count,
.rx_descriptor_done = otx2_nix_rx_descriptor_done,
.rx_descriptor_status = otx2_nix_rx_descriptor_status,
+ .tx_descriptor_status = otx2_nix_tx_descriptor_status,
.tx_done_cleanup = otx2_nix_tx_done_cleanup,
.pool_ops_supported = otx2_nix_pool_ops_supported,
.filter_ctrl = otx2_nix_dev_filter_ctrl,
if (rc)
goto free_mac_addrs;
+ otx2_nix_mc_filter_init(dev);
+
otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
" rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
eth_dev->data->port_id, dev->pf, dev->vf,
/* Disable other rte_flow entries */
otx2_flow_fini(dev);
+ /* Free multicast filter list */
+ otx2_nix_mc_filter_fini(dev);
+
/* Disable PTP if already enabled */
if (otx2_ethdev_is_ptp_en(dev))
otx2_nix_timesync_disable(eth_dev);