X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=e5404f7ba4ea320c4d2bd925983e16d10f2293d5;hb=ec246eeb5da1;hp=df9db044d0f9b004ff15a7b4dd1f3310e03f88a3;hpb=9aace75fc82e07f33e90666287d776ece1034cbf;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index df9db044d0..e5404f7ba4 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -60,9 +61,8 @@ #include "i40e_ethdev.h" #include "i40e_rxtx.h" #include "i40e_pf.h" +#include "i40e_regs.h" -/* Maximun number of MAC addresses */ -#define I40E_NUM_MACADDR_MAX 64 #define I40E_CLEAR_PXE_WAIT_MS 200 /* Maximun number of capability elements */ @@ -75,11 +75,6 @@ /* Maximun number of VSI */ #define I40E_MAX_NUM_VSIS (384UL) -/* Default queue interrupt throttling time in microseconds */ -#define I40E_ITR_INDEX_DEFAULT 0 -#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ -#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ - #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */ /* Flow control default timer */ @@ -129,23 +124,18 @@ (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \ (1UL << RTE_ETH_FLOW_L2_PAYLOAD)) -#define I40E_PTP_40GB_INCVAL 0x0199999999ULL -#define I40E_PTP_10GB_INCVAL 0x0333333333ULL -#define I40E_PTP_1GB_INCVAL 0x2000000000ULL -#define I40E_PRTTSYN_TSYNENA 0x80000000 -#define I40E_PRTTSYN_TSYNTYPE 0x0e000000 +/* Additional timesync values. */ +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL 0x0333333333ULL +#define I40E_PTP_1GB_INCVAL 0x2000000000ULL +#define I40E_PRTTSYN_TSYNENA 0x80000000 +#define I40E_PRTTSYN_TSYNTYPE 0x0e000000 +#define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL #define I40E_MAX_PERCENT 100 #define I40E_DEFAULT_DCB_APP_NUM 1 #define I40E_DEFAULT_DCB_APP_PRIO 3 -#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) -#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) -#define I40E_GLQF_FD_MSK_FIELD 0x0000FFFF -#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) -#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) -#define I40E_GLQF_HASH_MSK_FIELD 0x0000FFFF - #define I40E_INSET_NONE 0x00000000000000000ULL /* bit0 ~ bit 7 */ @@ -196,7 +186,7 @@ #define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL #define I40E_INSET_FLEX_PAYLOAD \ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \ - I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W3 | \ + I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8) @@ -209,9 +199,9 @@ /* Source MAC address */ #define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL /* VLAN tag in the outer L2 header */ -#define I40E_REG_INSET_L2_OUTER_VLAN 0x0000000000800000ULL +#define I40E_REG_INSET_L2_OUTER_VLAN 0x0080000000000000ULL /* VLAN tag in the inner L2 header */ -#define I40E_REG_INSET_L2_INNER_VLAN 0x0000000001000000ULL +#define I40E_REG_INSET_L2_INNER_VLAN 0x0100000000000000ULL /* Source IPv4 address */ #define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL /* Destination IPv4 address */ @@ -264,7 +254,8 @@ #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7 0x0000000000000080ULL /* 8th word of flex payload */ #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8 0x0000000000000040ULL - +/* all 8 words flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS 0x0000000000003FC0ULL #define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL #define I40E_TRANSLATE_INSET 0 @@ -275,6 +266,22 @@ #define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL #define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL +#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4)) +#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16 +#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \ + I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) + +/* PCI offset for querying capability */ +#define PCI_DEV_CAP_REG 0xA4 +/* PCI offset for enabling/disabling Extended Tag */ +#define PCI_DEV_CTRL_REG 0xA8 +/* Bit mask of Extended Tag capability */ +#define PCI_DEV_CAP_EXT_TAG_MASK 0x20 +/* Bit shift of Extended Tag enable/disable */ +#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8 +/* Bit mask of Extended Tag enable/disable */ +#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT) + static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev); static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); static int i40e_dev_configure(struct rte_eth_dev *dev); @@ -292,7 +299,6 @@ static void i40e_dev_stats_get(struct rte_eth_dev *dev, static int i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, unsigned n); static void i40e_dev_stats_reset(struct rte_eth_dev *dev); -static void i40e_dev_xstats_reset(struct rte_eth_dev *dev); static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id, uint8_t stat_idx, @@ -302,7 +308,9 @@ static void i40e_dev_info_get(struct rte_eth_dev *dev, static int i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); -static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid); +static int i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid); static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, @@ -343,7 +351,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw, bool offset_loaded, uint64_t *offset, uint64_t *stat); -static void i40e_pf_config_irq0(struct i40e_hw *hw); +static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue); static void i40e_dev_interrupt_handler( __rte_unused struct rte_intr_handle *handle, void *param); static int i40e_res_pool_init(struct i40e_res_pool_info *pool, @@ -372,10 +380,11 @@ static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); -static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, - struct rte_eth_udp_tunnel *udp_tunnel); -static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev, - struct rte_eth_udp_tunnel *udp_tunnel); +static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static void i40e_filter_input_set_init(struct i40e_pf *pf); static int i40e_ethertype_filter_set(struct i40e_pf *pf, struct rte_eth_ethertype_filter *filter, bool add); @@ -389,7 +398,7 @@ static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info); static void i40e_configure_registers(struct i40e_hw *hw); -static void i40e_hw_init(struct i40e_hw *hw); +static void i40e_hw_init(struct rte_eth_dev *dev); static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); static int i40e_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, @@ -405,6 +414,30 @@ static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp); static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw); +static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); + +static int i40e_timesync_read_time(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int i40e_timesync_write_time(struct rte_eth_dev *dev, + const struct timespec *timestamp); + +static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); + +static int i40e_get_reg_length(struct rte_eth_dev *dev); + +static int i40e_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); + +static int i40e_get_eeprom_length(struct rte_eth_dev *dev); + +static int i40e_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); + +static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr); static const struct rte_pci_id pci_id_i40e_map[] = { #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, @@ -427,9 +460,10 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .stats_get = i40e_dev_stats_get, .xstats_get = i40e_dev_xstats_get, .stats_reset = i40e_dev_stats_reset, - .xstats_reset = i40e_dev_xstats_reset, + .xstats_reset = i40e_dev_stats_reset, .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set, .dev_infos_get = i40e_dev_info_get, + .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get, .vlan_filter_set = i40e_vlan_filter_set, .vlan_tpid_set = i40e_vlan_tpid_set, .vlan_offload_set = i40e_vlan_offload_set, @@ -440,6 +474,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .tx_queue_start = i40e_dev_tx_queue_start, .tx_queue_stop = i40e_dev_tx_queue_stop, .rx_queue_setup = i40e_dev_rx_queue_setup, + .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable, .rx_queue_release = i40e_dev_rx_queue_release, .rx_queue_count = i40e_dev_rx_queue_count, .rx_descriptor_done = i40e_dev_rx_descriptor_done, @@ -456,8 +492,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .reta_query = i40e_dev_rss_reta_query, .rss_hash_update = i40e_dev_rss_hash_update, .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, - .udp_tunnel_add = i40e_dev_udp_tunnel_add, - .udp_tunnel_del = i40e_dev_udp_tunnel_del, + .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del, .filter_ctrl = i40e_dev_filter_ctrl, .rxq_info_get = i40e_rxq_info_get, .txq_info_get = i40e_txq_info_get, @@ -468,6 +504,14 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp, .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp, .get_dcb_info = i40e_dev_get_dcb_info, + .timesync_adjust_time = i40e_timesync_adjust_time, + .timesync_read_time = i40e_timesync_read_time, + .timesync_write_time = i40e_timesync_write_time, + .get_reg_length = i40e_get_reg_length, + .get_reg = i40e_get_regs, + .get_eeprom_length = i40e_get_eeprom_length, + .get_eeprom = i40e_get_eeprom, + .mac_addr_set = i40e_set_default_mac_addr, }; /* store statistics names and its offset in stats structure */ @@ -489,6 +533,9 @@ static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = { {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)}, }; +#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \ + sizeof(rte_i40e_stats_strings[0])) + static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = { {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats, tx_dropped_link_down)}, @@ -555,15 +602,30 @@ static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = { rx_lpi_count)}, }; -/* Q Stats: 5 stats are exposed for each queue, implemented in xstats_get() */ -#define I40E_NB_HW_PORT_Q_STATS (8 * 5) - -#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \ - sizeof(rte_i40e_stats_strings[0])) #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \ sizeof(rte_i40e_hw_port_strings[0])) -#define I40E_NB_XSTATS (I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS + \ - I40E_NB_HW_PORT_Q_STATS) + +static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = { + {"xon_packets", offsetof(struct i40e_hw_port_stats, + priority_xon_rx)}, + {"xoff_packets", offsetof(struct i40e_hw_port_stats, + priority_xoff_rx)}, +}; + +#define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \ + sizeof(rte_i40e_rxq_prio_strings[0])) + +static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = { + {"xon_packets", offsetof(struct i40e_hw_port_stats, + priority_xon_tx)}, + {"xoff_packets", offsetof(struct i40e_hw_port_stats, + priority_xoff_tx)}, + {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats, + priority_xon_2_xoff)}, +}; + +#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \ + sizeof(rte_i40e_txq_prio_strings[0])) static struct eth_driver rte_i40e_pmd = { .pci_drv = { @@ -733,7 +795,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) i40e_clear_hw(hw); /* Initialize the hardware */ - i40e_hw_init(hw); + i40e_hw_init(dev); /* Reset here to make sure all is clean for each PF */ ret = i40e_pf_reset(hw); @@ -756,6 +818,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) */ i40e_flex_payload_reg_init(hw); + /* Initialize the input set for filters (hash and fd) to default value */ + i40e_filter_input_set_init(pf); + /* Initialize the parameters for adminq */ i40e_init_adminq_parameter(hw); ret = i40e_init_adminq(hw); @@ -840,6 +905,20 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) hw->fc.requested_mode = I40E_FC_NONE; i40e_set_fc(hw, &aq_fail, TRUE); + /* Set the global registers with default ether type value */ + ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to set the default outer " + "VLAN ether type"); + goto err_setup_pf_switch; + } + ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, ETHER_TYPE_VLAN); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to set the default outer " + "VLAN ether type"); + goto err_setup_pf_switch; + } + /* PF setup, which includes VSI setup */ ret = i40e_pf_setup(pf); if (ret) { @@ -875,7 +954,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) i40e_dev_interrupt_handler, (void *)dev); /* configure and enable device interrupt */ - i40e_pf_config_irq0(hw); + i40e_pf_config_irq0(hw, TRUE); i40e_pf_enable_irq0(hw); /* enable uio intr after callback register */ @@ -887,6 +966,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) */ i40e_add_tx_flow_control_drop_filter(pf); + /* Set the max frame size to 0x2600 by default, + * in case other drivers changed the default value. + */ + i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL); + /* initialize mirror rule list */ TAILQ_INIT(&pf->mirror_list); @@ -1056,6 +1140,8 @@ err: void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) { + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t msix_vect = vsi->msix_intr; uint16_t i; @@ -1067,52 +1153,50 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) } if (vsi->type != I40E_VSI_SRIOV) { - I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0); - I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT, - msix_vect - 1), 0); + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, + I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK); + I40E_WRITE_REG(hw, + I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT), + 0); + } else { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), + I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK); + I40E_WRITE_REG(hw, + I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT, + msix_vect - 1), 0); + } } else { uint32_t reg; reg = (hw->func_caps.num_msix_vectors_vf - 1) * vsi->user_param + (msix_vect - 1); - I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0); + I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); } I40E_WRITE_FLUSH(hw); } -static inline uint16_t -i40e_calc_itr_interval(int16_t interval) -{ - if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) - interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; - - /* Convert to hardware count, as writing each 1 represents 2 us */ - return (interval/2); -} - -void -i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) +static void +__vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, + int base_queue, int nb_queue) { + int i; uint32_t val; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); - uint16_t msix_vect = vsi->msix_intr; - int i; - - for (i = 0; i < vsi->nb_qps; i++) - I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0); /* Bind all RX queues to allocated MSIX interrupt */ - for (i = 0; i < vsi->nb_qps; i++) { + for (i = 0; i < nb_queue; i++) { val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | I40E_QINT_RQCTL_ITR_INDX_MASK | - ((vsi->base_queue + i + 1) << - I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | + ((base_queue + i + 1) << + I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | I40E_QINT_RQCTL_CAUSE_ENA_MASK; - if (i == vsi->nb_qps - 1) + if (i == nb_queue - 1) val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK; - I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val); + I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val); } /* Write first RX queue to Link list register as the head element */ @@ -1120,56 +1204,172 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); - I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), - (vsi->base_queue << - I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | - (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); - - I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT, - msix_vect - 1), interval); - -#ifndef I40E_GLINT_CTL -#define I40E_GLINT_CTL 0x0003F800 -#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4 -#endif - /* Disable auto-mask on enabling of all none-zero interrupt */ - I40E_WRITE_REG(hw, I40E_GLINT_CTL, - I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK); + if (msix_vect == I40E_MISC_VEC_ID) { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, + (base_queue << + I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); + I40E_WRITE_REG(hw, + I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT), + interval); + } else { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), + (base_queue << + I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + I40E_WRITE_REG(hw, + I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT, + msix_vect - 1), + interval); + } } else { uint32_t reg; - /* num_msix_vectors_vf needs to minus irq0 */ - reg = (hw->func_caps.num_msix_vectors_vf - 1) * - vsi->user_param + (msix_vect - 1); + if (msix_vect == I40E_MISC_VEC_ID) { + I40E_WRITE_REG(hw, + I40E_VPINT_LNKLST0(vsi->user_param), + (base_queue << + I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); + } else { + /* num_msix_vectors_vf needs to minus irq0 */ + reg = (hw->func_caps.num_msix_vectors_vf - 1) * + vsi->user_param + (msix_vect - 1); - I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue << + I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), + (base_queue << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | - (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + (0x0 << + I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + } } I40E_WRITE_FLUSH(hw); } +void +i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t msix_vect = vsi->msix_intr; + uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); + uint16_t queue_idx = 0; + int record = 0; + uint32_t val; + int i; + + for (i = 0; i < vsi->nb_qps; i++) { + I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0); + I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0); + } + + /* INTENA flag is not auto-cleared for interrupt */ + val = I40E_READ_REG(hw, I40E_GLINT_CTL); + val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | + I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK | + I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; + I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); + + /* VF bind interrupt */ + if (vsi->type == I40E_VSI_SRIOV) { + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue, vsi->nb_qps); + return; + } + + /* PF & VMDq bind interrupt */ + if (rte_intr_dp_is_en(intr_handle)) { + if (vsi->type == I40E_VSI_MAIN) { + queue_idx = 0; + record = 1; + } else if (vsi->type == I40E_VSI_VMDQ2) { + struct i40e_vsi *main_vsi = + I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter); + queue_idx = vsi->base_queue - main_vsi->nb_qps; + record = 1; + } + } + + for (i = 0; i < vsi->nb_used_qps; i++) { + if (nb_msix <= 1) { + if (!rte_intr_allow_others(intr_handle)) + /* allow to share MISC_VEC_ID */ + msix_vect = I40E_MISC_VEC_ID; + + /* no enough msix_vect, map all to one */ + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue + i, + vsi->nb_used_qps - i); + for (; !!record && i < vsi->nb_used_qps; i++) + intr_handle->intr_vec[queue_idx + i] = + msix_vect; + break; + } + /* 1:1 queue/msix_vect mapping */ + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue + i, 1); + if (!!record) + intr_handle->intr_vec[queue_idx + i] = msix_vect; + + msix_vect++; + nb_msix--; + } +} + static void i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) { + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); uint16_t interval = i40e_calc_itr_interval(\ - RTE_LIBRTE_I40E_ITR_INTERVAL); - - I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), - I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + RTE_LIBRTE_I40E_ITR_INTERVAL); + uint16_t msix_intr, i; + + if (rte_intr_allow_others(intr_handle)) + for (i = 0; i < vsi->nb_msix; i++) { + msix_intr = vsi->msix_intr + i; + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), + I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + (interval << + I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + } + else + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) | + (interval << + I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)); + + I40E_WRITE_FLUSH(hw); } static void i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) { + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t msix_intr, i; + + if (rte_intr_allow_others(intr_handle)) + for (i = 0; i < vsi->nb_msix; i++) { + msix_intr = vsi->msix_intr + i; + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), + 0); + } + else + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); - I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0); + I40E_WRITE_FLUSH(hw); } static inline uint8_t @@ -1199,58 +1399,15 @@ i40e_parse_link_speed(uint16_t eth_link_speed) } static int -i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed) -{ - enum i40e_status_code status; - struct i40e_aq_get_phy_abilities_resp phy_ab; - struct i40e_aq_set_phy_config phy_conf; - const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX | - I40E_AQ_PHY_FLAG_PAUSE_RX | - I40E_AQ_PHY_FLAG_LOW_POWER; - const uint8_t advt = I40E_LINK_SPEED_40GB | - I40E_LINK_SPEED_10GB | - I40E_LINK_SPEED_1GB | - I40E_LINK_SPEED_100MB; - int ret = -ENOTSUP; - - /* Skip it on 40G interfaces, as a workaround for the link issue */ - if (i40e_is_40G_device(hw->device_id)) - return I40E_SUCCESS; - - status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab, - NULL); - if (status) - return ret; - - memset(&phy_conf, 0, sizeof(phy_conf)); - - /* bits 0-2 use the values from get_phy_abilities_resp */ - abilities &= ~mask; - abilities |= phy_ab.abilities & mask; - - /* update ablities and speed */ - if (abilities & I40E_AQ_PHY_AN_ENABLED) - phy_conf.link_speed = advt; - else - phy_conf.link_speed = force_speed; - - phy_conf.abilities = abilities; - - /* use get_phy_abilities_resp value for the rest */ - phy_conf.phy_type = phy_ab.phy_type; - phy_conf.eee_capability = phy_ab.eee_capability; - phy_conf.eeer = phy_ab.eeer_val; - phy_conf.low_power_ctrl = phy_ab.d3_lpan; - - PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x", - phy_ab.abilities, phy_ab.link_speed); - PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x", - phy_conf.abilities, phy_conf.link_speed); - - status = i40e_aq_set_phy_config(hw, &phy_conf, NULL); - if (status) - return ret; - +i40e_phy_conf_link(__rte_unused struct i40e_hw *hw, + __rte_unused uint8_t abilities, + __rte_unused uint8_t force_speed) +{ + /* Skip any phy config on both 10G and 40G interfaces, as a workaround + * for the link control limitation of that all link control should be + * handled by firmware. It should follow up if link control will be + * opened to software driver in future firmware versions. + */ return I40E_SUCCESS; } @@ -1279,6 +1436,8 @@ i40e_dev_start(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; int ret, i; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t intr_vector = 0; hw->adapter_stopped = 0; @@ -1290,6 +1449,28 @@ i40e_dev_start(struct rte_eth_dev *dev) return -EINVAL; } + rte_intr_disable(intr_handle); + + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), + 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + /* Initialize VSI */ ret = i40e_dev_rxtx_init(pf); if (ret != I40E_SUCCESS) { @@ -1298,11 +1479,14 @@ i40e_dev_start(struct rte_eth_dev *dev) } /* Map queues with MSIX interrupt */ + main_vsi->nb_used_qps = dev->data->nb_rx_queues - + pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; i40e_vsi_queues_bind_intr(main_vsi); i40e_vsi_enable_queues_intr(main_vsi); /* Map VMDQ VSI queues with MSIX interrupt */ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi); i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); } @@ -1339,6 +1523,22 @@ i40e_dev_start(struct rte_eth_dev *dev) goto err_up; } + if (!rte_intr_allow_others(intr_handle)) { + rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, + (void *)dev); + /* configure and enable device interrupt */ + i40e_pf_config_irq0(hw, FALSE); + i40e_pf_enable_irq0(hw); + + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, "lsc won't enable because of" + " no intr multiplex\n"); + } + + /* enable uio intr after callback register */ + rte_intr_enable(intr_handle); + return I40E_SUCCESS; err_up: @@ -1354,6 +1554,7 @@ i40e_dev_stop(struct rte_eth_dev *dev) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; struct i40e_mirror_rule *p_mirror; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; int i; /* Disable all queues */ @@ -1369,8 +1570,8 @@ i40e_dev_stop(struct rte_eth_dev *dev) } if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi); - i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); + i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi); + i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi); } /* Clear all queues and release memory */ i40e_dev_clear_queues(dev); @@ -1385,6 +1586,18 @@ i40e_dev_stop(struct rte_eth_dev *dev) } pf->nb_mirror_rule = 0; + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + i40e_dev_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } } static void @@ -1666,6 +1879,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) unsigned int i; struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */ + /* Get statistics of struct i40e_eth_stats */ i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), @@ -1683,6 +1897,12 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) I40E_GLPRT_BPRCL(hw->port), pf->offset_loaded, &os->eth.rx_broadcast, &ns->eth.rx_broadcast); + /* Workaround: CRC size should not be included in byte statistics, + * so subtract ETHER_CRC_LEN from the byte counter for each rx packet. + */ + ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + + ns->eth.rx_broadcast) * ETHER_CRC_LEN; + i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port), pf->offset_loaded, &os->eth.rx_discards, &ns->eth.rx_discards); @@ -1708,6 +1928,8 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) I40E_GLPRT_BPTCL(hw->port), pf->offset_loaded, &os->eth.tx_broadcast, &ns->eth.tx_broadcast); + ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + + ns->eth.tx_broadcast) * ETHER_CRC_LEN; /* GLPRT_TEPC not supported */ /* additional port specific stats */ @@ -1865,20 +2087,18 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->opackets = pf->main_vsi->eth_stats.tx_unicast + pf->main_vsi->eth_stats.tx_multicast + pf->main_vsi->eth_stats.tx_broadcast; - stats->ibytes = pf->main_vsi->eth_stats.rx_bytes; - stats->obytes = pf->main_vsi->eth_stats.tx_bytes; + stats->ibytes = ns->eth.rx_bytes; + stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors + pf->main_vsi->eth_stats.tx_errors; stats->imcasts = pf->main_vsi->eth_stats.rx_multicast; - stats->fdirmatch = ns->fd_sb_match; /* Rx Errors */ - stats->ibadcrc = ns->crc_errors; - stats->ibadlen = ns->rx_length_errors + ns->rx_undersize + - ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; stats->imissed = ns->eth.rx_discards + pf->main_vsi->eth_stats.rx_discards; - stats->ierrors = stats->ibadcrc + stats->ibadlen + stats->imissed; + stats->ierrors = ns->crc_errors + + ns->rx_length_errors + ns->rx_undersize + + ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************"); PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes); @@ -1951,19 +2171,28 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************"); } +/* Reset the statistics */ static void -i40e_dev_xstats_reset(struct rte_eth_dev *dev) +i40e_dev_stats_reset(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_hw_port_stats *hw_stats = &pf->stats; - /* The hw registers are cleared on read */ + /* Mark PF and VSI stats to update the offset, aka "reset" */ pf->offset_loaded = false; + if (pf->main_vsi) + pf->main_vsi->offset_loaded = false; + + /* read the stats, reading current register values into offset */ i40e_read_stats_registers(pf, hw); +} - /* reset software counters */ - memset(hw_stats, 0, sizeof(*hw_stats)); +static uint32_t +i40e_xstats_calc_num(void) +{ + return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS + + (I40E_NB_RXQ_PRIO_XSTATS * 8) + + (I40E_NB_TXQ_PRIO_XSTATS * 8); } static int @@ -1972,18 +2201,20 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - unsigned i, count = 0; + unsigned i, count, prio; struct i40e_hw_port_stats *hw_stats = &pf->stats; - if (n < I40E_NB_XSTATS) - return I40E_NB_XSTATS; + count = i40e_xstats_calc_num(); + if (n < count) + return count; i40e_read_stats_registers(pf, hw); - /* Reset */ if (xstats == NULL) return 0; + count = 0; + /* Get stats from i40e_eth_stats struct */ for (i = 0; i < I40E_NB_ETH_XSTATS; i++) { snprintf(xstats[count].name, sizeof(xstats[count].name), @@ -2002,55 +2233,35 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, count++; } - /* Get per-queue stats from i40e_hw_port struct */ - for (i = 0; i < 8; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "rx_q%u_xon_priority_packets", i); - xstats[count].value = *(uint64_t *)(((char *)hw_stats) + - offsetof(struct i40e_hw_port_stats, - priority_xon_rx[i])); - count++; - - snprintf(xstats[count].name, sizeof(xstats[count].name), - "rx_q%u_xoff_priority_packets", i); - xstats[count].value = *(uint64_t *)(((char *)hw_stats) + - offsetof(struct i40e_hw_port_stats, - priority_xoff_rx[i])); - count++; - - snprintf(xstats[count].name, sizeof(xstats[count].name), - "tx_q%u_xon_priority_packets", i); - xstats[count].value = *(uint64_t *)(((char *)hw_stats) + - offsetof(struct i40e_hw_port_stats, - priority_xon_tx[i])); - count++; - - snprintf(xstats[count].name, sizeof(xstats[count].name), - "tx_q%u_xoff_priority_packets", i); - xstats[count].value = *(uint64_t *)(((char *)hw_stats) + - offsetof(struct i40e_hw_port_stats, - priority_xoff_tx[i])); - count++; - - snprintf(xstats[count].name, sizeof(xstats[count].name), - "xx_q%u_xon_to_xoff_priority_packets", i); - xstats[count].value = *(uint64_t *)(((char *)hw_stats) + - offsetof(struct i40e_hw_port_stats, - priority_xon_2_xoff[i])); - count++; + for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) { + for (prio = 0; prio < 8; prio++) { + snprintf(xstats[count].name, + sizeof(xstats[count].name), + "rx_priority%u_%s", prio, + rte_i40e_rxq_prio_strings[i].name); + xstats[count].value = + *(uint64_t *)(((char *)hw_stats) + + rte_i40e_rxq_prio_strings[i].offset + + (sizeof(uint64_t) * prio)); + count++; + } } - return I40E_NB_XSTATS; -} - -/* Reset the statistics */ -static void -i40e_dev_stats_reset(struct rte_eth_dev *dev) -{ - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) { + for (prio = 0; prio < 8; prio++) { + snprintf(xstats[count].name, + sizeof(xstats[count].name), + "tx_priority%u_%s", prio, + rte_i40e_txq_prio_strings[i].name); + xstats[count].value = + *(uint64_t *)(((char *)hw_stats) + + rte_i40e_txq_prio_strings[i].offset + + (sizeof(uint64_t) * prio)); + count++; + } + } - /* It results in reloading the start point of each counter */ - pf->offset_loaded = false; + return count; } static int @@ -2154,11 +2365,59 @@ i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return i40e_vsi_delete_vlan(vsi, vlan_id); } -static void -i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev, - __rte_unused uint16_t tpid) +static int +i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) { - PMD_INIT_FUNC_TRACE(); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t reg_r = 0, reg_w = 0; + uint16_t reg_id = 0; + int ret = 0; + + switch (vlan_type) { + case ETH_VLAN_TYPE_OUTER: + reg_id = 2; + break; + case ETH_VLAN_TYPE_INNER: + reg_id = 3; + break; + default: + ret = -EINVAL; + PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type); + return ret; + } + ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + ®_r, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Fail to debug read from " + "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); + ret = -EIO; + return ret; + } + PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: " + "0x%08"PRIx64"", reg_id, reg_r); + + reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK)); + reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT); + if (reg_r == reg_w) { + ret = 0; + PMD_DRV_LOG(DEBUG, "No need to write"); + return ret; + } + + ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + reg_w, NULL); + if (ret != I40E_SUCCESS) { + ret = -EIO; + PMD_DRV_LOG(ERR, "Fail to debug write to " + "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); + return ret; + } + PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to " + "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id); + + return ret; } static void @@ -2167,6 +2426,13 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *vsi = pf->main_vsi; + if (mask & ETH_VLAN_FILTER_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + i40e_vsi_config_vlan_filter(vsi, TRUE); + else + i40e_vsi_config_vlan_filter(vsi, FALSE); + } + if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ if (dev->data->dev_conf.rxmode.hw_vlan_strip) @@ -2418,7 +2684,10 @@ i40e_macaddr_add(struct rte_eth_dev *dev, } (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); - mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + else + mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; if (pool == 0) vsi = pf->main_vsi; @@ -2737,33 +3006,24 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, u64 size, u32 alignment) { - static uint64_t id = 0; const struct rte_memzone *mz = NULL; char z_name[RTE_MEMZONE_NAMESIZE]; if (!mem) return I40E_ERR_PARAM; - id++; - snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id); -#ifdef RTE_LIBRTE_XEN_DOM0 + snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand()); mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0, alignment, RTE_PGSIZE_2M); -#else - mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY, 0, - alignment); -#endif if (!mz) return I40E_ERR_NO_MEMORY; - mem->id = id; mem->size = size; mem->va = mz->addr; -#ifdef RTE_LIBRTE_XEN_DOM0 mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); -#else - mem->pa = mz->phys_addr; -#endif + mem->zone = (const void *)mz; + PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: " + "%"PRIu64, mz->name, mem->pa); return I40E_SUCCESS; } @@ -2777,9 +3037,14 @@ enum i40e_status_code i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, struct i40e_dma_mem *mem) { - if (!mem || !mem->va) + if (!mem) return I40E_ERR_PARAM; + PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: " + "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name, + mem->pa); + rte_memzone_free((const struct rte_memzone *)mem->zone); + mem->zone = NULL; mem->va = NULL; mem->pa = (u64)0; @@ -2947,17 +3212,36 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) /* VMDq queue/VSI allocation */ pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num; + pf->vmdq_nb_qps = 0; + pf->max_nb_vmdq_vsi = 0; if (hw->func_caps.vmdq) { - pf->flags |= I40E_FLAG_VMDQ; - pf->vmdq_nb_qps = pf->vmdq_nb_qp_max; - pf->max_nb_vmdq_vsi = 1; - PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues per VMDQ VSI, " - "in total %u queues", pf->max_nb_vmdq_vsi, - pf->vmdq_nb_qps, - pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi); - } else { - pf->vmdq_nb_qps = 0; - pf->max_nb_vmdq_vsi = 0; + if (qp_count < hw->func_caps.num_tx_qp && + vsi_count < hw->func_caps.num_vsis) { + pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp - + qp_count) / pf->vmdq_nb_qp_max; + + /* Limit the maximum number of VMDq vsi to the maximum + * ethdev can support + */ + pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi, + hw->func_caps.num_vsis - vsi_count); + pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi, + ETH_64_POOLS); + if (pf->max_nb_vmdq_vsi) { + pf->flags |= I40E_FLAG_VMDQ; + pf->vmdq_nb_qps = pf->vmdq_nb_qp_max; + PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues " + "per VMDQ VSI, in total %u queues", + pf->max_nb_vmdq_vsi, + pf->vmdq_nb_qps, pf->vmdq_nb_qps * + pf->max_nb_vmdq_vsi); + } else { + PMD_DRV_LOG(INFO, "No enough queues left for " + "VMDq"); + } + } else { + PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq"); + } } qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi; vsi_count += pf->max_nb_vmdq_vsi; @@ -3233,7 +3517,7 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool, pool->num_free -= valid_entry->len; pool->num_alloc += valid_entry->len; - return (valid_entry->base + pool->base); + return valid_entry->base + pool->base; } /** @@ -3245,7 +3529,7 @@ bitmap_is_subset(uint8_t src1, uint8_t src2) return !((src1 ^ src2) & src2); } -static int +static enum i40e_status_code validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap) { struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); @@ -3253,14 +3537,14 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap) /* If DCB is not supported, only default TC is supported */ if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) { PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported"); - return -EINVAL; + return I40E_NOT_SUPPORTED; } if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) { PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to " "HW support 0x%x", hw->func_caps.enabled_tcmap, enabled_tcmap); - return -EINVAL; + return I40E_NOT_SUPPORTED; } return I40E_SUCCESS; } @@ -3345,12 +3629,13 @@ i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap) return I40E_SUCCESS; } -static int +static enum i40e_status_code i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi, struct i40e_aqc_vsi_properties_data *info, uint8_t enabled_tcmap) { - int ret, i, total_tc = 0; + enum i40e_status_code ret; + int i, total_tc = 0; uint16_t qpnum_per_tc, bsf, qp_idx; ret = validate_tcmap_parameter(vsi, enabled_tcmap); @@ -3455,7 +3740,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) veb->uplink_seid = vsi->uplink_seid; ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid, - I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL); + I40E_DEFAULT_TCMAP, false, &veb->seid, false, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d", @@ -3579,14 +3864,21 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return i40e_vsi_add_mac(vsi, &filter); } -static int -i40e_vsi_dump_bw_config(struct i40e_vsi *vsi) +/* + * i40e_vsi_get_bw_config - Query VSI BW Information + * @vsi: the VSI to be queried + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_vsi_get_bw_config(struct i40e_vsi *vsi) { struct i40e_aqc_query_vsi_bw_config_resp bw_config; struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config; struct i40e_hw *hw = &vsi->adapter->hw; i40e_status ret; int i; + uint32_t bw_max; memset(&bw_config, 0, sizeof(bw_config)); ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); @@ -3605,20 +3897,71 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi) return ret; } - /* Not store the info yet, just print out */ - PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit); - PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw); + /* store and print out BW info */ + vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit); + vsi->bw_info.bw_max = bw_config.max_bw; + PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit); + PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max); + bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) | + (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) << + I40E_16_BIT_WIDTH); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i, - ets_sla_config.share_credits[i]); - PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i, - rte_le_to_cpu_16(ets_sla_config.credits[i])); - PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i, - rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >> - (i * 4)); + vsi->bw_info.bw_ets_share_credits[i] = + ets_sla_config.share_credits[i]; + vsi->bw_info.bw_ets_credits[i] = + rte_le_to_cpu_16(ets_sla_config.credits[i]); + /* 4 bits per TC, 4th bit is reserved */ + vsi->bw_info.bw_ets_max[i] = + (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) & + RTE_LEN2MASK(3, uint8_t)); + PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i, + vsi->bw_info.bw_ets_share_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i, + vsi->bw_info.bw_ets_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i, + vsi->bw_info.bw_ets_max[i]); } - return 0; + return I40E_SUCCESS; +} + +/* i40e_enable_pf_lb + * @pf: pointer to the pf structure + * + * allow loopback on pf + */ +static inline void +i40e_enable_pf_lb(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi_context ctxt; + int ret; + + /* Use the FW API if FW >= v5.0 */ + if (hw->aq.fw_maj_ver < 5) { + PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); + return; + } + + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = hw->pf_id; + ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d", + ret, hw->aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id |= + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) + PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n", + hw->aq.asq_last_status); } /* Setup a VSI */ @@ -3656,6 +3999,8 @@ i40e_vsi_setup(struct i40e_pf *pf, PMD_DRV_LOG(ERR, "VEB setup failed"); return NULL; } + /* set ALLOWLOOPBACk on pf, when veb is created */ + i40e_enable_pf_lb(pf); } vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0); @@ -3706,15 +4051,30 @@ i40e_vsi_setup(struct i40e_pf *pf, vsi->base_queue = I40E_FDIR_QUEUE_ID; /* VF has MSIX interrupt in VF range, don't allocate here */ - if (type != I40E_VSI_SRIOV) { + if (type == I40E_VSI_MAIN) { + ret = i40e_res_pool_alloc(&pf->msix_pool, + RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID)); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d", + vsi->seid, ret); + goto fail_queue_alloc; + } + vsi->msix_intr = ret; + vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); + } else if (type != I40E_VSI_SRIOV) { ret = i40e_res_pool_alloc(&pf->msix_pool, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret); goto fail_queue_alloc; } vsi->msix_intr = ret; - } else + vsi->nb_msix = 1; + } else { vsi->msix_intr = 0; + vsi->nb_msix = 0; + } + /* Add VSI */ if (type == I40E_VSI_MAIN) { /* For main VSI, no need to add since it's default one */ @@ -3813,14 +4173,14 @@ i40e_vsi_setup(struct i40e_pf *pf, ctxt.connection_type = 0x1; ctxt.flags = I40E_AQ_VSI_TYPE_VF; - /** - * Do not configure switch ID to enable VEB switch by - * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville, - * if the source mac address of packet sent from VF is not - * listed in the VEB's mac table, the VEB will switch the - * packet back to the VF. Need to enable it when HW issue - * is fixed. - */ + /* Use the VEB configuration if FW >= v5.0 */ + if (hw->aq.fw_maj_ver >= 5) { + /* Configure switch ID */ + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } /* Configure port/vlan */ ctxt.info.valid_sections |= @@ -3929,7 +4289,7 @@ i40e_vsi_setup(struct i40e_pf *pf, } /* Get VSI BW information */ - i40e_vsi_dump_bw_config(vsi); + i40e_vsi_get_bw_config(vsi); return vsi; fail_msix_alloc: i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr); @@ -3940,6 +4300,63 @@ fail_mem: return NULL; } +/* Configure vlan filter on or off */ +int +i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on) +{ + int i, num; + struct i40e_mac_filter *f; + struct i40e_mac_filter_info *mac_filter; + enum rte_mac_filter_type desired_filter; + int ret = I40E_SUCCESS; + + if (on) { + /* Filter to match MAC and VLAN */ + desired_filter = RTE_MACVLAN_PERFECT_MATCH; + } else { + /* Filter to match only MAC */ + desired_filter = RTE_MAC_PERFECT_MATCH; + } + + num = vsi->mac_num; + + mac_filter = rte_zmalloc("mac_filter_info_data", + num * sizeof(*mac_filter), 0); + if (mac_filter == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + i = 0; + + /* Remove all existing mac */ + TAILQ_FOREACH(f, &vsi->mac_list, next) { + mac_filter[i] = f->mac_info; + ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr); + if (ret) { + PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter", + on ? "enable" : "disable"); + goto DONE; + } + i++; + } + + /* Override with new filter */ + for (i = 0; i < num; i++) { + mac_filter[i].filter_type = desired_filter; + ret = i40e_vsi_add_mac(vsi, &mac_filter[i]); + if (ret) { + PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter", + on ? "enable" : "disable"); + goto DONE; + } + } + +DONE: + rte_free(mac_filter); + return ret; +} + /* Configure vlan stripping on or off */ int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on) @@ -3987,9 +4404,11 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev) { struct rte_eth_dev_data *data = dev->data; int ret; + int mask = 0; /* Apply vlan offload setting */ - i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; + i40e_vlan_offload_set(dev, mask); /* Apply double-vlan setting, not implemented yet */ @@ -4558,7 +4977,7 @@ i40e_pf_enable_irq0(struct i40e_hw *hw) } static void -i40e_pf_config_irq0(struct i40e_hw *hw) +i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue) { /* read pending request and disable first */ i40e_pf_disable_irq0(hw); @@ -4566,9 +4985,10 @@ i40e_pf_config_irq0(struct i40e_hw *hw) I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK); - /* Link no queues with irq0 */ - I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, - I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK); + if (no_queue) + /* Link no queues with irq0 */ + I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, + I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK); } static void @@ -5422,11 +5842,11 @@ i40e_pf_disable_rss(struct i40e_pf *pf) struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint64_t hena; - hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32; + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; hena &= ~I40E_RSS_HENA_ALL; - I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena); - I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); I40E_WRITE_FLUSH(hw); } @@ -5437,9 +5857,14 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); int ret = 0; - if (!key || key_len != ((I40E_PFQF_HKEY_MAX_INDEX + 1) * - sizeof(uint32_t))) + if (!key || key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + return 0; + } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); return -EINVAL; + } if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { struct i40e_aqc_get_set_rss_key_data *key_dw = @@ -5454,7 +5879,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) uint16_t i; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]); + i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]); I40E_WRITE_FLUSH(hw); } @@ -5483,7 +5908,7 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) uint16_t i; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - key_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i)); + key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); } *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); @@ -5504,12 +5929,12 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf) return ret; rss_hf = rss_conf->rss_hf; - hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32; + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; hena &= ~I40E_RSS_HENA_ALL; hena |= i40e_config_hena(rss_hf); - I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena); - I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); I40E_WRITE_FLUSH(hw); return 0; @@ -5524,8 +5949,8 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev, uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL; uint64_t hena; - hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32; + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -EINVAL; @@ -5549,8 +5974,8 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key, &rss_conf->rss_key_len); - hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32; + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; rss_conf->rss_hf = i40e_parse_hena(hena); return 0; @@ -5575,6 +6000,12 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) case ETH_TUNNEL_FILTER_IMAC: *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC; break; + case ETH_TUNNEL_FILTER_OIP: + *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP; + break; + case ETH_TUNNEL_FILTER_IIP: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP; + break; default: PMD_DRV_LOG(ERR, "invalid tunnel filter type"); return -EINVAL; @@ -5589,7 +6020,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, uint8_t add) { uint16_t ip_type; - uint8_t tun_type = 0; + uint8_t i, tun_type = 0; + /* internal varialbe to convert ipv6 byte order */ + uint32_t convert_ipv6[4]; int val, ret = 0; struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi = pf->main_vsi; @@ -5606,32 +6039,36 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, } pfilter = cld_filter; - (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac, - sizeof(struct ether_addr)); - (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac, - sizeof(struct ether_addr)); + ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac); - pfilter->inner_vlan = tunnel_filter->inner_vlan; + pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan); if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; - (void)rte_memcpy(&pfilter->ipaddr.v4.data, - &tunnel_filter->ip_addr, + rte_memcpy(&pfilter->ipaddr.v4.data, + &rte_cpu_to_le_32(tunnel_filter->ip_addr.ipv4_addr), sizeof(pfilter->ipaddr.v4.data)); } else { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; - (void)rte_memcpy(&pfilter->ipaddr.v6.data, - &tunnel_filter->ip_addr, + for (i = 0; i < 4; i++) { + convert_ipv6[i] = + rte_cpu_to_le_32(tunnel_filter->ip_addr.ipv6_addr[i]); + } + rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6, sizeof(pfilter->ipaddr.v6.data)); } /* check tunneled type */ switch (tunnel_filter->tunnel_type) { case RTE_TUNNEL_TYPE_VXLAN: - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; break; case RTE_TUNNEL_TYPE_NVGRE: tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; break; + case RTE_TUNNEL_TYPE_IP_IN_GRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; + break; default: /* Other tunnel types is not supported. */ PMD_DRV_LOG(ERR, "tunnel type is not supported."); @@ -5646,10 +6083,11 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, return -EINVAL; } - pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type | - (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT); - pfilter->tenant_id = tunnel_filter->tenant_id; - pfilter->queue_number = tunnel_filter->queue_id; + pfilter->flags |= rte_cpu_to_le_16( + I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | + ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); + pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id); if (add) ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1); @@ -5754,8 +6192,8 @@ i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port) /* Add UDP tunneling port */ static int -i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, - struct rte_eth_udp_tunnel *udp_tunnel) +i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) { int ret = 0; struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -5785,8 +6223,8 @@ i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, /* Remove UDP tunneling port */ static int -i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev, - struct rte_eth_udp_tunnel *udp_tunnel) +i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) { int ret = 0; struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -5909,13 +6347,13 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf, } if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) && - (is_zero_ether_addr(filter->outer_mac))) { + (is_zero_ether_addr(&filter->outer_mac))) { PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address"); return -EINVAL; } if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) && - (is_zero_ether_addr(filter->inner_mac))) { + (is_zero_ether_addr(&filter->inner_mac))) { PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address"); return -EINVAL; } @@ -6051,7 +6489,7 @@ i40e_pf_config_mq_rx(struct i40e_pf *pf) static void i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable) { - uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0); + uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0; } @@ -6060,7 +6498,7 @@ i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable) static void i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) { - uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0); + uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); if (enable > 0) { if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) { @@ -6077,7 +6515,7 @@ i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) } reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK; } - I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg); + i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg); I40E_WRITE_FLUSH(hw); } @@ -6095,7 +6533,7 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, enum i40e_filter_pctype pctype; memset(g_cfg, 0, sizeof(*g_cfg)); - reg = I40E_READ_REG(hw, I40E_GLQF_CTL); + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); if (reg & I40E_GLQF_CTL_HTOEP_MASK) g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; else @@ -6110,7 +6548,7 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, /* Bit set indicats the coresponding flow type is supported */ g_cfg->valid_bit_mask[0] |= (1UL << i); pctype = i40e_flowtype_to_pctype(i); - reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype)); + reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype)); if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) g_cfg->sym_hash_enable_mask[0] |= (1UL << i); } @@ -6183,10 +6621,10 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, pctype = i40e_flowtype_to_pctype(i); reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; - I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); } - reg = I40E_READ_REG(hw, I40E_GLQF_CTL); + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) { /* Toeplitz */ if (reg & I40E_GLQF_CTL_HTOEP_MASK) { @@ -6207,7 +6645,7 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, /* Use the default, and keep it as it is */ goto out; - I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg); + i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg); out: I40E_WRITE_FLUSH(hw); @@ -6323,43 +6761,32 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, */ static const uint64_t valid_fdir_inset_table[] = { [I40E_FILTER_PCTYPE_FRAG_IPV4] = - I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | - I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST, [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | - I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | - I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = - I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | - I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | - I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST, [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | - I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_SCTP_VT, [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = - I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | - I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST, [I40E_FILTER_PCTYPE_FRAG_IPV6] = - I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | - I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST, [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = - I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | - I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | - I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST, [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = - I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | - I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | - I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST, [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | - I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_SCTP_VT, [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = - I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | - I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST, [I40E_FILTER_PCTYPE_L2_PAYLOAD] = - I40E_INSET_LAST_ETHER_TYPE | I40E_INSET_FLEX_PAYLOAD, + I40E_INSET_LAST_ETHER_TYPE, }; if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) @@ -6587,7 +7014,7 @@ i40e_translate_input_set_reg(uint64_t input) return val; } -static uint8_t +static int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) { uint8_t i, idx = 0; @@ -6605,16 +7032,13 @@ i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) if (!inset || !mask || !nb_elem) return 0; - if (!inset && nb_elem >= I40E_INSET_MASK_NUM_REG) { - for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) - mask[i] = 0; - return I40E_INSET_MASK_NUM_REG; - } for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) { - if (idx >= nb_elem) - break; - if (inset & inset_mask_map[i].inset) { + if ((inset & inset_mask_map[i].inset) == inset_mask_map[i].inset) { + if (idx >= nb_elem) { + PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks"); + return -EINVAL; + } mask[idx] = inset_mask_map[i].mask; idx++; } @@ -6623,134 +7047,161 @@ i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) return idx; } -static uint64_t -i40e_get_reg_inset(struct i40e_hw *hw, enum rte_filter_type filter, - enum i40e_filter_pctype pctype) -{ - uint64_t reg = 0; - - if (filter == RTE_ETH_FILTER_HASH) { - reg = I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(1, pctype)); - reg <<= I40E_32_BIT_WIDTH; - reg |= I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(0, pctype)); - } else if (filter == RTE_ETH_FILTER_FDIR) { - reg = I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 1)); - reg <<= I40E_32_BIT_WIDTH; - reg |= I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 0)); - } - - return reg; -} - static void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { - uint32_t reg = I40E_READ_REG(hw, addr); + uint32_t reg = i40e_read_rx_ctl(hw, addr); PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg); if (reg != val) - I40E_WRITE_REG(hw, addr, val); + i40e_write_rx_ctl(hw, addr, val); PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr, - (uint32_t)I40E_READ_REG(hw, addr)); + (uint32_t)i40e_read_rx_ctl(hw, addr)); } -static int -i40e_set_hash_inset_mask(struct i40e_hw *hw, - enum i40e_filter_pctype pctype, - enum rte_filter_input_set_op op, - uint32_t *mask_reg, - uint8_t num) +static void +i40e_filter_input_set_init(struct i40e_pf *pf) { - uint32_t reg; - uint8_t i; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_filter_pctype pctype; + uint64_t input_set, inset_reg; + uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; + int num, i; - if (!mask_reg || num > RTE_ETH_INPUT_SET_SELECT) - return -EINVAL; + for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { + if (!I40E_VALID_PCTYPE(pctype)) + continue; + input_set = i40e_get_default_input_set(pctype); - if (op == RTE_ETH_INPUT_SET_SELECT) { - for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) { - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - 0); - if (i >= num) - continue; + num = i40e_generate_inset_mask_reg(input_set, mask_reg, + I40E_INSET_MASK_NUM_REG); + if (num < 0) + return; + inset_reg = i40e_translate_input_set_reg(input_set); + + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + for (i = 0; i < num; i++) { + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), mask_reg[i]); } - } else if (op == RTE_ETH_INPUT_SET_ADD) { - uint8_t j, count = 0; - - for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) { - reg = I40E_READ_REG(hw, I40E_GLQF_HASH_MSK(i, pctype)); - if (reg & I40E_GLQF_HASH_MSK_FIELD) - count++; + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) { + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + 0); + i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + 0); } - if (count + num > I40E_INSET_MASK_NUM_REG) - return -EINVAL; + I40E_WRITE_FLUSH(hw); - for (i = count, j = 0; i < I40E_INSET_MASK_NUM_REG; i++, j++) - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - mask_reg[j]); + /* store the default input set */ + pf->hash_input_set[pctype] = input_set; + pf->fdir.input_set[pctype] = input_set; } - - return 0; } -static int -i40e_set_fd_inset_mask(struct i40e_hw *hw, - enum i40e_filter_pctype pctype, - enum rte_filter_input_set_op op, - uint32_t *mask_reg, - uint8_t num) +int +i40e_hash_filter_inset_select(struct i40e_hw *hw, + struct rte_eth_input_set_conf *conf) { - uint32_t reg; - uint8_t i; + struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; + enum i40e_filter_pctype pctype; + uint64_t input_set, inset_reg = 0; + uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; + int ret, i, num; - if (!mask_reg || num > RTE_ETH_INPUT_SET_SELECT) + if (!conf) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + if (conf->op != RTE_ETH_INPUT_SET_SELECT && + conf->op != RTE_ETH_INPUT_SET_ADD) { + PMD_DRV_LOG(ERR, "Unsupported input set operation"); return -EINVAL; + } - if (op == RTE_ETH_INPUT_SET_SELECT) { - for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) { - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - 0); - if (i >= num) - continue; - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - mask_reg[i]); - } - } else if (op == RTE_ETH_INPUT_SET_ADD) { - uint8_t j, count = 0; - - for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) { - reg = I40E_READ_REG(hw, I40E_GLQF_FD_MSK(i, pctype)); - if (reg & I40E_GLQF_FD_MSK_FIELD) - count++; - } - if (count + num > I40E_INSET_MASK_NUM_REG) - return -EINVAL; + pctype = i40e_flowtype_to_pctype(conf->flow_type); + if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) { + PMD_DRV_LOG(ERR, "Not supported flow type (%u)", + conf->flow_type); + return -EINVAL; + } - for (i = count, j = 0; i < I40E_INSET_MASK_NUM_REG; i++, j++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - mask_reg[j]); + ret = i40e_parse_input_set(&input_set, pctype, conf->field, + conf->inset_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to parse input set"); + return -EINVAL; + } + if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH, + input_set) != 0) { + PMD_DRV_LOG(ERR, "Invalid input set"); + return -EINVAL; } + if (conf->op == RTE_ETH_INPUT_SET_ADD) { + /* get inset value in register */ + inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype)); + input_set |= pf->hash_input_set[pctype]; + } + num = i40e_generate_inset_mask_reg(input_set, mask_reg, + I40E_INSET_MASK_NUM_REG); + if (num < 0) + return -EINVAL; + + inset_reg |= i40e_translate_input_set_reg(input_set); + i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + for (i = 0; i < num; i++) + i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + 0); + I40E_WRITE_FLUSH(hw); + + pf->hash_input_set[pctype] = input_set; return 0; } int -i40e_filter_inset_select(struct i40e_hw *hw, - struct rte_eth_input_set_conf *conf, - enum rte_filter_type filter) +i40e_fdir_filter_inset_select(struct i40e_pf *pf, + struct rte_eth_input_set_conf *conf) { + struct i40e_hw *hw = I40E_PF_TO_HW(pf); enum i40e_filter_pctype pctype; - uint64_t inset_reg = 0, input_set; - uint32_t mask_reg[I40E_INSET_MASK_NUM_REG]; - uint8_t num; - int ret; + uint64_t input_set, inset_reg = 0; + uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; + int ret, i, num; if (!hw || !conf) { PMD_DRV_LOG(ERR, "Invalid pointer"); return -EFAULT; } + if (conf->op != RTE_ETH_INPUT_SET_SELECT && + conf->op != RTE_ETH_INPUT_SET_ADD) { + PMD_DRV_LOG(ERR, "Unsupported input set operation"); + return -EINVAL; + } pctype = i40e_flowtype_to_pctype(conf->flow_type); if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) { @@ -6758,60 +7209,54 @@ i40e_filter_inset_select(struct i40e_hw *hw, conf->flow_type); return -EINVAL; } - if (filter != RTE_ETH_FILTER_HASH && filter != RTE_ETH_FILTER_FDIR) { - PMD_DRV_LOG(ERR, "Not supported filter type (%u)", filter); - return -EINVAL; - } - ret = i40e_parse_input_set(&input_set, pctype, conf->field, conf->inset_size); if (ret) { PMD_DRV_LOG(ERR, "Failed to parse input set"); return -EINVAL; } - if (i40e_validate_input_set(pctype, filter, input_set) != 0) { + if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR, + input_set) != 0) { PMD_DRV_LOG(ERR, "Invalid input set"); return -EINVAL; } - if (conf->op == RTE_ETH_INPUT_SET_ADD) { - inset_reg |= i40e_get_reg_inset(hw, filter, pctype); - } else if (conf->op != RTE_ETH_INPUT_SET_SELECT) { - PMD_DRV_LOG(ERR, "Unsupported input set operation"); - return -EINVAL; - } + /* get inset value in register */ + inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0)); + + /* Can not change the inset reg for flex payload for fdir, + * it is done by writing I40E_PRTQF_FD_FLXINSET + * in i40e_set_flex_mask_on_pctype. + */ + if (conf->op == RTE_ETH_INPUT_SET_SELECT) + inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS; + else + input_set |= pf->fdir.input_set[pctype]; num = i40e_generate_inset_mask_reg(input_set, mask_reg, I40E_INSET_MASK_NUM_REG); - inset_reg |= i40e_translate_input_set_reg(input_set); - - if (filter == RTE_ETH_FILTER_HASH) { - ret = i40e_set_hash_inset_mask(hw, pctype, conf->op, mask_reg, - num); - if (ret) - return -EINVAL; + if (num < 0) + return -EINVAL; - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), - (uint32_t)(inset_reg & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), - (uint32_t)((inset_reg >> - I40E_32_BIT_WIDTH) & UINT32_MAX)); - } else if (filter == RTE_ETH_FILTER_FDIR) { - ret = i40e_set_fd_inset_mask(hw, pctype, conf->op, mask_reg, - num); - if (ret) - return -EINVAL; + inset_reg |= i40e_translate_input_set_reg(input_set); - i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), - (uint32_t)(inset_reg & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), - (uint32_t)((inset_reg >> - I40E_32_BIT_WIDTH) & UINT32_MAX)); - } else { - PMD_DRV_LOG(ERR, "Not supported filter type (%u)", filter); - return -EINVAL; - } + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + for (i = 0; i < num; i++) + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + 0); I40E_WRITE_FLUSH(hw); + pf->fdir.input_set[pctype] = input_set; return 0; } @@ -6863,9 +7308,8 @@ i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) &(info->info.global_conf)); break; case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT: - ret = i40e_filter_inset_select(hw, - &(info->info.input_set_conf), - RTE_ETH_FILTER_HASH); + ret = i40e_hash_filter_inset_select(hw, + &(info->info.input_set_conf)); break; default: @@ -7039,16 +7483,64 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev, return ret; } +/* + * Check and enable Extended Tag. + * Enabling Extended Tag is important for 40G performance. + */ +static void +i40e_enable_extended_tag(struct rte_eth_dev *dev) +{ + uint32_t buf = 0; + int ret; + + ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf), + PCI_DEV_CAP_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_DEV_CAP_REG); + return; + } + if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) { + PMD_DRV_LOG(ERR, "Does not support Extended Tag"); + return; + } + + buf = 0; + ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf), + PCI_DEV_CTRL_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_DEV_CTRL_REG); + return; + } + if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) { + PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled"); + return; + } + buf |= PCI_DEV_CTRL_EXT_TAG_MASK; + ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf), + PCI_DEV_CTRL_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x", + PCI_DEV_CTRL_REG); + return; + } +} + /* * As some registers wouldn't be reset unless a global hardware reset, * hardware initialization is needed to put those registers into an * expected initial state. */ static void -i40e_hw_init(struct i40e_hw *hw) +i40e_hw_init(struct rte_eth_dev *dev) { + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + i40e_enable_extended_tag(dev); + /* clear the PF Queue Filter control register */ - I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0); + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0); /* Disable symmetric hash per port */ i40e_set_symmetric_hash_enable_per_port(hw, 0); @@ -7549,17 +8041,61 @@ i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id) return 0; } -static int -i40e_timesync_enable(struct rte_eth_dev *dev) +static uint64_t +i40e_read_systime_cyclecounter(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link *link = &dev->data->dev_link; - uint32_t tsync_ctl_l; - uint32_t tsync_ctl_h; + uint64_t systim_cycles; + + systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L); + systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H) + << 32; + + return systim_cycles; +} + +static uint64_t +i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_tstamp; + + rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index)); + rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index)) + << 32; + + return rx_tstamp; +} + +static uint64_t +i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t tx_tstamp; + + tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L); + tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H) + << 32; + + return tx_tstamp; +} + +static void +i40e_start_timecounters(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + struct rte_eth_link link; uint32_t tsync_inc_l; uint32_t tsync_inc_h; - switch (link->link_speed) { + /* Get current link speed. */ + memset(&link, 0, sizeof(link)); + i40e_dev_link_update(dev, 1); + rte_i40e_dev_atomic_read_link_status(dev, &link); + + switch (link.link_speed) { case ETH_LINK_SPEED_40G: tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF; tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32; @@ -7577,19 +8113,95 @@ i40e_timesync_enable(struct rte_eth_dev *dev) tsync_inc_h = 0x0; } - /* Clear timesync registers. */ - I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0); - I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H); - I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(0)); - I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(1)); - I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(2)); - I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(3)); - I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H); - /* Set the timesync increment value. */ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l); I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h); + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK; + adapter->systime_tc.cc_shift = 0; + adapter->systime_tc.nsec_mask = 0; + + adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK; + adapter->rx_tstamp_tc.cc_shift = 0; + adapter->rx_tstamp_tc.nsec_mask = 0; + + adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK; + adapter->tx_tstamp_tc.cc_shift = 0; + adapter->tx_tstamp_tc.nsec_mask = 0; +} + +static int +i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + + adapter->systime_tc.nsec += delta; + adapter->rx_tstamp_tc.nsec += delta; + adapter->tx_tstamp_tc.nsec += delta; + + return 0; +} + +static int +i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + + ns = rte_timespec_to_ns(ts); + + /* Set the timecounters to a new value. */ + adapter->systime_tc.nsec = ns; + adapter->rx_tstamp_tc.nsec = ns; + adapter->tx_tstamp_tc.nsec = ns; + + return 0; +} + +static int +i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + uint64_t ns, systime_cycles; + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + + systime_cycles = i40e_read_systime_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return 0; +} + +static int +i40e_timesync_enable(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl_l; + uint32_t tsync_ctl_h; + + /* Stop the timesync system time. */ + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0); + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0); + /* Reset the timesync system time value. */ + I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0); + I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0); + + i40e_start_timecounters(dev); + + /* Clear timesync registers. */ + I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0); + I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0)); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1)); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2)); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3)); + /* Enable timestamping of PTP packets. */ tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0); tsync_ctl_l |= I40E_PRTTSYN_TSYNENA; @@ -7621,7 +8233,7 @@ i40e_timesync_disable(struct rte_eth_dev *dev) I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l); I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h); - /* Set the timesync increment value. */ + /* Reset the timesync increment value. */ I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0); I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0); @@ -7633,22 +8245,23 @@ i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp, uint32_t flags) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + uint32_t sync_status; - uint32_t rx_stmpl; - uint32_t rx_stmph; uint32_t index = flags & 0x03; + uint64_t rx_tstamp_cycles; + uint64_t ns; sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1); if ((sync_status & (1 << index)) == 0) return -EINVAL; - rx_stmpl = I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index)); - rx_stmph = I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index)); - - timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl); - timestamp->tv_nsec = 0; + rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index); + ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); - return 0; + return 0; } static int @@ -7656,21 +8269,22 @@ i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + uint32_t sync_status; - uint32_t tx_stmpl; - uint32_t tx_stmph; + uint64_t tx_tstamp_cycles; + uint64_t ns; sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0); if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0) return -EINVAL; - tx_stmpl = I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L); - tx_stmph = I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H); - - timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl); - timestamp->tv_nsec = 0; + tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); - return 0; + return 0; } /* @@ -7733,78 +8347,18 @@ i40e_parse_dcb_configure(struct rte_eth_dev *dev, return 0; } -/* - * i40e_vsi_get_bw_info - Query VSI BW Information - * @vsi: the VSI being queried - * - * Returns 0 on success, negative value on failure - */ -static int -i40e_vsi_get_bw_info(struct i40e_vsi *vsi) -{ - struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; - struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; - struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); - int i, ret; - uint32_t tc_bw_max; - /* Get the VSI level BW configuration */ - ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); - if (ret) { - PMD_INIT_LOG(ERR, - "couldn't get PF vsi bw config, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); - return -EINVAL; - } - - /* Get the VSI level BW configuration per TC */ - ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, - NULL); - if (ret) { - PMD_INIT_LOG(ERR, - "couldn't get PF vsi ets bw config, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); - return -EINVAL; - } - - if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { - PMD_INIT_LOG(WARNING, - "Enabled TCs mismatch from querying VSI BW info" - " 0x%08x 0x%08x\n", bw_config.tc_valid_bits, - bw_ets_config.tc_valid_bits); - /* Still continuing */ - } - - vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit); - vsi->bw_info.bw_max_quanta = bw_config.max_bw; - tc_bw_max = rte_le_to_cpu_16(bw_ets_config.tc_bw_max[0]) | - (rte_le_to_cpu_16(bw_ets_config.tc_bw_max[1]) << 16); - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - vsi->bw_info.bw_ets_share_credits[i] = - bw_ets_config.share_credits[i]; - vsi->bw_info.bw_ets_limit_credits[i] = - rte_le_to_cpu_16(bw_ets_config.credits[i]); - /* 3 bits out of 4 for each TC */ - vsi->bw_info.bw_ets_max_quanta[i] = - (uint8_t)((tc_bw_max >> (i * 4)) & 0x7); - PMD_INIT_LOG(DEBUG, - "%s: vsi seid = %d, TC = %d, qset = 0x%x\n", - __func__, vsi->seid, i, bw_config.qs_handles[i]); - } - - return 0; -} - -static int +static enum i40e_status_code i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, struct i40e_aqc_vsi_properties_data *info, uint8_t enabled_tcmap) { - int ret, i, total_tc = 0; + enum i40e_status_code ret; + int i, total_tc = 0; uint16_t qpnum_per_tc, bsf, qp_idx; struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + uint16_t used_queues; ret = validate_tcmap_parameter(vsi, enabled_tcmap); if (ret != I40E_SUCCESS) @@ -7818,7 +8372,18 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, total_tc = 1; vsi->enabled_tc = enabled_tcmap; - qpnum_per_tc = dev_data->nb_rx_queues / total_tc; + /* different VSI has different queues assigned */ + if (vsi->type == I40E_VSI_MAIN) + used_queues = dev_data->nb_rx_queues - + pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + else if (vsi->type == I40E_VSI_VMDQ2) + used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + else { + PMD_INIT_LOG(ERR, "unsupported VSI type."); + return I40E_ERR_NO_AVAILABLE_VSI; + } + + qpnum_per_tc = used_queues / total_tc; /* Number of queues per enabled TC */ if (qpnum_per_tc == 0) { PMD_INIT_LOG(ERR, " number of queues is less that tcs."); @@ -7862,6 +8427,93 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, return I40E_SUCCESS; } +/* + * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map + * @veb: VEB to be configured + * @tc_map: enabled TC bitmap + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) +{ + struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw; + struct i40e_aqc_query_switching_comp_bw_config_resp bw_query; + struct i40e_aqc_query_switching_comp_ets_config_resp ets_query; + struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi); + enum i40e_status_code ret = I40E_SUCCESS; + int i; + uint32_t bw_max; + + /* Check if enabled_tc is same as existing or new TCs */ + if (veb->enabled_tc == tc_map) + return ret; + + /* configure tc bandwidth */ + memset(&veb_bw, 0, sizeof(veb_bw)); + veb_bw.tc_valid_bits = tc_map; + /* Enable ETS TCs with equal BW Share for now across all VSIs */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (tc_map & BIT_ULL(i)) + veb_bw.tc_bw_share_credits[i] = 1; + } + ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid, + &veb_bw, NULL); + if (ret) { + PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation" + " per TC failed = %d", + hw->aq.asq_last_status); + return ret; + } + + memset(&ets_query, 0, sizeof(ets_query)); + ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, + &ets_query, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS" + " configuration %u", hw->aq.asq_last_status); + return ret; + } + memset(&bw_query, 0, sizeof(bw_query)); + ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, + &bw_query, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth" + " configuration %u", hw->aq.asq_last_status); + return ret; + } + + /* store and print out BW info */ + veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit); + veb->bw_info.bw_max = ets_query.tc_bw_max; + PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit); + PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max); + bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) | + (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) << + I40E_16_BIT_WIDTH); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + veb->bw_info.bw_ets_share_credits[i] = + bw_query.tc_bw_share_credits[i]; + veb->bw_info.bw_ets_credits[i] = + rte_le_to_cpu_16(bw_query.tc_bw_limits[i]); + /* 4 bits per TC, 4th bit is reserved */ + veb->bw_info.bw_ets_max[i] = + (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) & + RTE_LEN2MASK(3, uint8_t)); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i, + veb->bw_info.bw_ets_share_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i, + veb->bw_info.bw_ets_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i, + veb->bw_info.bw_ets_max[i]); + } + + veb->enabled_tc = tc_map; + + return ret; +} + + /* * i40e_vsi_config_tc - Configure VSI tc setting for given TC map * @vsi: VSI to be configured @@ -7869,13 +8521,13 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, * * Returns 0 on success, negative value on failure */ -static int -i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map) +static enum i40e_status_code +i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; struct i40e_vsi_context ctxt; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); - int ret = 0; + enum i40e_status_code ret = I40E_SUCCESS; int i; /* Check if enabled_tc is same as existing or new TCs */ @@ -7928,8 +8580,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map) vsi->info.mapping_flags = ctxt.info.mapping_flags; vsi->info.valid_sections = 0; - /* Update current VSI BW information */ - ret = i40e_vsi_get_bw_info(vsi); + /* query and update current VSI BW information */ + ret = i40e_vsi_get_bw_config(vsi); if (ret) { PMD_INIT_LOG(ERR, "Failed updating vsi bw info, err %s aq_err %s", @@ -7961,11 +8613,13 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config; struct i40e_vsi *main_vsi = pf->main_vsi; struct i40e_vsi_list *vsi_list; - int i, ret; + enum i40e_status_code ret; + int i; uint32_t val; /* Use the FW API if FW > v4.4*/ - if (!((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4))) { + if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) || + (hw->aq.fw_maj_ver >= 5))) { PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API" " to configure DCB"); return I40E_ERR_FIRMWARE_API_VERSION; @@ -8010,15 +8664,27 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, &hw->local_dcbx_config); + /* if Veb is created, need to update TC of it at first */ + if (main_vsi->veb) { + ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map); + if (ret) + PMD_INIT_LOG(WARNING, + "Failed configuring TC for VEB seid=%d\n", + main_vsi->veb->seid); + } /* Update each VSI */ i40e_vsi_config_tc(main_vsi, tc_map); if (main_vsi->veb) { TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) { - /* Beside main VSI, only enable default + /* Beside main VSI and VMDQ VSIs, only enable default * TC for other VSIs */ - ret = i40e_vsi_config_tc(vsi_list->vsi, - I40E_DEFAULT_TCMAP); + if (vsi_list->vsi->type == I40E_VSI_VMDQ2) + ret = i40e_vsi_config_tc(vsi_list->vsi, + tc_map); + else + ret = i40e_vsi_config_tc(vsi_list->vsi, + I40E_DEFAULT_TCMAP); if (ret) PMD_INIT_LOG(WARNING, "Failed configuring TC for VSI seid=%d\n", @@ -8138,9 +8804,8 @@ i40e_dcb_setup(struct rte_eth_dev *dev) return -ENOTSUP; } - if (pf->vf_num != 0 || - (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)) - PMD_INIT_LOG(DEBUG, " DCB only works on main vsi."); + if (pf->vf_num != 0) + PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis."); ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map); if (ret) { @@ -8152,6 +8817,7 @@ i40e_dcb_setup(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "dcb sw configure fails"); return -ENOSYS; } + return 0; } @@ -8164,7 +8830,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, struct i40e_vsi *vsi = pf->main_vsi; struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config; uint16_t bsf, tc_mapping; - int i; + int i, j; if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1); @@ -8175,21 +8841,177 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, for (i = 0; i < dcb_info->nb_tcs; i++) dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i]; - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (vsi->enabled_tc & (1 << i)) { + j = 0; + do { + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); /* only main vsi support multi TCs */ - dcb_info->tc_queue.tc_rxq[0][i].base = + dcb_info->tc_queue.tc_rxq[j][i].base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; - dcb_info->tc_queue.tc_txq[0][i].base = - dcb_info->tc_queue.tc_rxq[0][i].base; + dcb_info->tc_queue.tc_txq[j][i].base = + dcb_info->tc_queue.tc_rxq[j][i].base; bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; - dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 1 << bsf; - dcb_info->tc_queue.tc_txq[0][i].nb_queue = - dcb_info->tc_queue.tc_rxq[0][i].nb_queue; + dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf; + dcb_info->tc_queue.tc_txq[j][i].nb_queue = + dcb_info->tc_queue.tc_rxq[j][i].nb_queue; } + vsi = pf->vmdq[j].vsi; + j++; + } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL)); + return 0; +} + +static int +i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t interval = + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (interval << + I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + else + I40E_WRITE_REG(hw, + I40E_PFINT_DYN_CTLN(msix_intr - + I40E_RX_VEC_START), + I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (interval << + I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + + I40E_WRITE_FLUSH(hw); + rte_intr_enable(&dev->pci_dev->intr_handle); + + return 0; +} + +static int +i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + else + I40E_WRITE_REG(hw, + I40E_PFINT_DYN_CTLN(msix_intr - + I40E_RX_VEC_START), + 0); + I40E_WRITE_FLUSH(hw); + + return 0; +} + +static int i40e_get_reg_length(__rte_unused struct rte_eth_dev *dev) +{ + /* Highest base addr + 32-bit word */ + return I40E_GLGEN_STAT_CLEAR + 4; +} + +static int i40e_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *ptr_data = regs->data; + uint32_t reg_idx, arr_idx, arr_idx2, reg_offset; + const struct i40e_reg_info *reg_info; + + /* The first few registers have to be read using AQ operations */ + reg_idx = 0; + while (i40e_regs_adminq[reg_idx].name) { + reg_info = &i40e_regs_adminq[reg_idx++]; + for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++) + for (arr_idx2 = 0; + arr_idx2 <= reg_info->count2; + arr_idx2++) { + reg_offset = arr_idx * reg_info->stride1 + + arr_idx2 * reg_info->stride2; + ptr_data[reg_offset >> 2] = + i40e_read_rx_ctl(hw, reg_offset); + } } + + /* The remaining registers can be read using primitives */ + reg_idx = 0; + while (i40e_regs_others[reg_idx].name) { + reg_info = &i40e_regs_others[reg_idx++]; + for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++) + for (arr_idx2 = 0; + arr_idx2 <= reg_info->count2; + arr_idx2++) { + reg_offset = arr_idx * reg_info->stride1 + + arr_idx2 * reg_info->stride2; + ptr_data[reg_offset >> 2] = + I40E_READ_REG(hw, reg_offset); + } + } + return 0; } + +static int i40e_get_eeprom_length(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Convert word count to byte count */ + return hw->nvm.sr_size << 1; +} + +static int i40e_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t *data = eeprom->data; + uint16_t offset, length, cnt_words; + int ret_code; + + offset = eeprom->offset >> 1; + length = eeprom->length >> 1; + cnt_words = length; + + if (offset > hw->nvm.sr_size || + offset + length > hw->nvm.sr_size) { + PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range."); + return -EINVAL; + } + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data); + if (ret_code != I40E_SUCCESS || cnt_words != length) { + PMD_DRV_LOG(ERR, "EEPROM read failed."); + return -EIO; + } + + return 0; +} + +static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!is_valid_assigned_ether_addr(mac_addr)) { + PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); + return; + } + + /* Flags: 0x3 updates port address */ + i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL); +}