PMD_REGISTER_DRIVER(rte_i40e_driver);
+/*
+ * Initialize registers for flexible payload, which should be set by NVM.
+ * This should be removed from code once it is fixed in NVM.
+ */
+#ifndef I40E_GLQF_ORT
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
+#endif
+#ifndef I40E_GLQF_PIT
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
+#endif
+
+static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
+{
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
+
+ /* GLQF_PIT Registers */
+ I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
+ I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
+}
+
static int
eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
struct rte_eth_dev *dev)
return ret;
}
+ /*
+ * To work around the NVM issue,initialize registers
+ * for flexible payload by software.
+ * It should be removed once issues are fixed in NVM.
+ */
+ i40e_flex_payload_reg_init(hw);
+
/* Initialize the parameters for adminq */
i40e_init_adminq_parameter(hw);
ret = i40e_init_adminq(hw);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
+ i40e_fdir_teardown(pf);
err_get_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
}
+ ret = i40e_fdir_configure(dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to configure fdir.");
+ goto err_up;
+ }
+
+ /* enable FDIR MSIX interrupt */
+ if (pf->flags & I40E_FLAG_FDIR) {
+ i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ }
+
/* Enable all queues which have been configured */
ret = i40e_dev_switch_queues(pf, TRUE);
if (ret != I40E_SUCCESS) {
i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
}
+ if (pf->flags & I40E_FLAG_FDIR) {
+ i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ }
/* Clear all queues and release memory */
i40e_dev_clear_queues(dev);
/* Set link down */
i40e_dev_set_link_down(dev);
+
}
static void
i40e_shutdown_lan_hmc(hw);
/* release all the existing VSIs and VEBs */
+ i40e_fdir_teardown(pf);
i40e_vsi_release(pf->main_vsi);
/* shutdown the adminq */
case I40E_VSI_VMDQ2:
vsi->nb_qps = pf->vmdq_nb_qps;
break;
+ case I40E_VSI_FDIR:
+ vsi->nb_qps = pf->fdir_nb_qps;
+ break;
default:
goto fail_mem;
}
- ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
- vsi->seid, ret);
- goto fail_mem;
- }
- vsi->base_queue = ret;
+ /*
+ * The filter status descriptor is reported in rx queue 0,
+ * while the tx queue for fdir filter programming has no
+ * such constraints, can be non-zero queues.
+ * To simplify it, choose FDIR vsi use queue 0 pair.
+ * To make sure it will use queue 0 pair, queue allocation
+ * need be done before this function is called
+ */
+ if (type != I40E_VSI_FDIR) {
+ ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
+ vsi->seid, ret);
+ goto fail_mem;
+ }
+ vsi->base_queue = ret;
+ } else
+ vsi->base_queue = I40E_FDIR_QUEUE_ID;
/* VF has MSIX interrupt in VF range, don't allocate here */
if (type != I40E_VSI_SRIOV) {
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
ctxt.info.valid_sections |=
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ } else if (type == I40E_VSI_FDIR) {
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure "
+ "TC queue mapping.");
+ goto fail_msix_alloc;
+ }
+ ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
} else {
PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
goto fail_msix_alloc;
PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
return ret;
}
-
- /* VSI setup */
+ if (pf->flags & I40E_FLAG_FDIR) {
+ /* make queue allocated first, let FDIR use queue pair 0*/
+ ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
+ if (ret != I40E_FDIR_QUEUE_ID) {
+ PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
+ " ret =%d", ret);
+ pf->flags &= ~I40E_FLAG_FDIR;
+ }
+ }
+ /* main VSI setup */
vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
if (!vsi) {
PMD_DRV_LOG(ERR, "Setup of main vsi failed");
}
pf->main_vsi = vsi;
+ /* setup FDIR after main vsi created.*/
+ if (pf->flags & I40E_FLAG_FDIR) {
+ ret = i40e_fdir_setup(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to setup flow director.");
+ pf->flags &= ~I40E_FLAG_FDIR;
+ }
+ }
+
/* Configure filter control */
memset(&settings, 0, sizeof(settings));
if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
case RTE_ETH_FILTER_TUNNEL:
ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
return ret;
}
+
+enum i40e_filter_pctype
+i40e_flowtype_to_pctype(enum rte_eth_flow_type flow_type)
+{
+ static const enum i40e_filter_pctype pctype_table[] = {
+ [RTE_ETH_FLOW_TYPE_UDPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ [RTE_ETH_FLOW_TYPE_TCPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ [RTE_ETH_FLOW_TYPE_SCTPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ [RTE_ETH_FLOW_TYPE_IPV4_OTHER] =
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ [RTE_ETH_FLOW_TYPE_FRAG_IPV4] =
+ I40E_FILTER_PCTYPE_FRAG_IPV4,
+ [RTE_ETH_FLOW_TYPE_UDPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ [RTE_ETH_FLOW_TYPE_TCPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ [RTE_ETH_FLOW_TYPE_SCTPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ [RTE_ETH_FLOW_TYPE_IPV6_OTHER] =
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ [RTE_ETH_FLOW_TYPE_FRAG_IPV6] =
+ I40E_FILTER_PCTYPE_FRAG_IPV6,
+ };
+
+ return pctype_table[flow_type];
+}
+
+enum rte_eth_flow_type
+i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
+{
+ static const enum rte_eth_flow_type flowtype_table[] = {
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = RTE_ETH_FLOW_TYPE_UDPV4,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = RTE_ETH_FLOW_TYPE_TCPV4,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV4,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ RTE_ETH_FLOW_TYPE_IPV4_OTHER,
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ RTE_ETH_FLOW_TYPE_FRAG_IPV4,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = RTE_ETH_FLOW_TYPE_UDPV6,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = RTE_ETH_FLOW_TYPE_TCPV6,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV6,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ RTE_ETH_FLOW_TYPE_IPV6_OTHER,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ RTE_ETH_FLOW_TYPE_FRAG_IPV6,
+ };
+
+ return flowtype_table[pctype];
+}