#define HNS3_PF_FUNC_ID 0
#define HNS3_1ST_VF_FUNC_ID 1
+#define HNS3_SW_SHIFT_AND_DISCARD_MODE 0
+#define HNS3_HW_SHIFT_AND_DISCARD_MODE 1
+
#define HNS3_UC_MACADDR_NUM 128
#define HNS3_VF_UC_MACADDR_NUM 48
#define HNS3_MC_MACADDR_NUM 128
#define HNS3_HIP08_MIN_TX_PKT_LEN 33
#define HNS3_HIP09_MIN_TX_PKT_LEN 9
+#define HNS3_BITS_PER_BYTE 8
+
#define HNS3_4_TCS 4
#define HNS3_8_TCS 8
};
struct hns3_tc_queue_info {
- uint8_t tqp_offset; /* TQP offset from base TQP */
- uint8_t tqp_count; /* Total TQPs */
- uint8_t tc; /* TC index */
+ uint16_t tqp_offset; /* TQP offset from base TQP */
+ uint16_t tqp_count; /* Total TQPs */
+ uint8_t tc; /* TC index */
bool enable; /* If this TC is enable or not */
};
* Kernel PF driver use mailbox to inform DPDK VF to do reset, the value
* of the reset level and the one defined in kernel driver should be
* same.
+ *
+ * According to the protocol of PCIe, FLR to a PF resets the PF state as
+ * well as the SR-IOV extended capability including VF Enable which
+ * means that VFs no longer exist.
+ *
+ * In PF FLR, the register state of VF is not reliable, VF's driver
+ * should not access the registers of the VF device.
*/
HNS3_VF_FULL_RESET = 3,
HNS3_FLR_RESET, /* A VF perform FLR reset */
uint8_t gl_unit;
};
+#define HNS3_TSO_SW_CAL_PSEUDO_H_CSUM 0
+#define HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 1
+
struct hns3_hw {
struct rte_eth_dev_data *data;
void *io_base;
uint16_t tqps_num; /* num task queue pairs of this function */
uint16_t intr_tqps_num; /* num queue pairs mapping interrupt */
uint16_t rss_size_max; /* HW defined max RSS task queue */
+ uint16_t rx_buf_len; /* hold min hardware rx buf len */
uint16_t num_tx_desc; /* desc num of per tx queue */
uint16_t num_rx_desc; /* desc num of per rx queue */
uint32_t mng_entry_num; /* number of manager table entry */
uint32_t min_tx_pkt_len;
struct hns3_queue_intr intr;
-
+ /*
+ * tso mode.
+ * value range:
+ * HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
+ *
+ * - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM
+ * In this mode, because of the hardware constraint, network driver
+ * software need erase the L4 len value of the TCP pseudo header
+ * and recalculate the TCP pseudo header checksum of packets that
+ * need TSO.
+ *
+ * - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
+ * In this mode, hardware support recalculate the TCP pseudo header
+ * checksum of packets that need TSO, so network driver software
+ * not need to recalculate it.
+ */
+ uint8_t tso_mode;
+ /*
+ * vlan mode.
+ * value range:
+ * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHFIT_AND_DISCARD_MODE
+ *
+ * - HNS3_SW_SHIFT_AND_DISCARD_MODE
+ * For some versions of hardware network engine, because of the
+ * hardware limitation, PMD driver needs to detect the PVID status
+ * to work with haredware to implement PVID-related functions.
+ * For example, driver need discard the stripped PVID tag to ensure
+ * the PVID will not report to mbuf and shift the inserted VLAN tag
+ * to avoid port based VLAN covering it.
+ *
+ * - HNS3_HW_SHIT_AND_DISCARD_MODE
+ * PMD driver does not need to process PVID-related functions in
+ * I/O process, Hardware will adjust the sequence between port based
+ * VLAN tag and BD VLAN tag automatically and VLAN tag stripped by
+ * PVID will be invisible to driver. And in this mode, hns3 is able
+ * to send a multi-layer VLAN packets when hw VLAN insert offload
+ * is enabled.
+ */
+ uint8_t vlan_mode;
uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
struct hns3_port_base_vlan_config port_base_vlan_cfg;
/* Vlan tag configuration for RX direction */
struct hns3_rx_vtag_cfg {
- uint8_t rx_vlan_offload_en; /* Whether enable rx vlan offload */
- uint8_t strip_tag1_en; /* Whether strip inner vlan tag */
- uint8_t strip_tag2_en; /* Whether strip outer vlan tag */
- uint8_t vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
- uint8_t vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
+ bool rx_vlan_offload_en; /* Whether enable rx vlan offload */
+ bool strip_tag1_en; /* Whether strip inner vlan tag */
+ bool strip_tag2_en; /* Whether strip outer vlan tag */
+ /*
+ * If strip_tag_en is enabled, this bit decide whether to map the vlan
+ * tag to descriptor.
+ */
+ bool strip_tag1_discard_en;
+ bool strip_tag2_discard_en;
+ /*
+ * If this bit is enabled, only map inner/outer priority to descriptor
+ * and the vlan tag is always 0.
+ */
+ bool vlan1_vlan_prionly;
+ bool vlan2_vlan_prionly;
};
/* Vlan tag configuration for TX direction */
bool accept_untag1; /* Whether accept untag1 packet from host */
bool accept_tag2;
bool accept_untag2;
- bool insert_tag1_en; /* Whether insert inner vlan tag */
- bool insert_tag2_en; /* Whether insert outer vlan tag */
- uint16_t default_tag1; /* The default inner vlan tag to insert */
- uint16_t default_tag2; /* The default outer vlan tag to insert */
+ bool insert_tag1_en; /* Whether insert outer vlan tag */
+ bool insert_tag2_en; /* Whether insert inner vlan tag */
+ /*
+ * In shift mode, hw will shift the sequence of port based VLAN and
+ * BD VLAN.
+ */
+ bool tag_shift_mode_en; /* hw shift vlan tag automatically */
+ uint16_t default_tag1; /* The default outer vlan tag to insert */
+ uint16_t default_tag2; /* The default inner vlan tag to insert */
};
struct hns3_vtag_cfg {
/* Key string for IPC. */
#define HNS3_MP_NAME "net_hns3_mp"
+#define HNS3_L2TBL_NUM 4
+#define HNS3_L3TBL_NUM 16
+#define HNS3_L4TBL_NUM 16
+#define HNS3_OL3TBL_NUM 16
+#define HNS3_OL4TBL_NUM 16
+
+struct hns3_ptype_table {
+ uint32_t l2table[HNS3_L2TBL_NUM];
+ uint32_t l3table[HNS3_L3TBL_NUM];
+ uint32_t l4table[HNS3_L4TBL_NUM];
+ uint32_t inner_l2table[HNS3_L2TBL_NUM];
+ uint32_t inner_l3table[HNS3_L3TBL_NUM];
+ uint32_t inner_l4table[HNS3_L4TBL_NUM];
+ uint32_t ol3table[HNS3_OL3TBL_NUM];
+ uint32_t ol4table[HNS3_OL4TBL_NUM];
+};
+
+#define HNS3_FIXED_MAX_TQP_NUM_MODE 0
+#define HNS3_FLEX_MAX_TQP_NUM_MODE 1
+
struct hns3_pf {
struct hns3_adapter *adapter;
bool is_main_pf;
uint16_t func_num; /* num functions of this pf, include pf and vfs */
+ /*
+ * tqp_config mode
+ * tqp_config_mode value range:
+ * HNS3_FIXED_MAX_TQP_NUM_MODE,
+ * HNS3_FLEX_MAX_TQP_NUM_MODE
+ *
+ * - HNS3_FIXED_MAX_TQP_NUM_MODE
+ * There is a limitation on the number of pf interrupts available for
+ * on some versions of network engines. In this case, the maximum
+ * queue number of pf can not be greater than the interrupt number,
+ * such as pf of network engine with revision_id 0x21. So the maximum
+ * number of queues must be fixed.
+ *
+ * - HNS3_FLEX_MAX_TQP_NUM_MODE
+ * In this mode, the maximum queue number of pf has not any constraint
+ * and comes from the macro RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF
+ * in the config file. Users can modify the macro according to their
+ * own application scenarios, which is more flexible to use.
+ */
+ uint8_t tqp_config_mode;
+
uint32_t pkt_buf_size; /* Total pf buf size for tx/rx */
uint32_t tx_buf_size; /* Tx buffer size for each TC */
uint32_t dv_buf_size; /* Dv buffer size for each TC */
struct hns3_err_msix_intr_stats abn_int_stats;
bool support_sfp_query;
+ uint32_t fec_mode; /* current FEC mode for ethdev */
struct hns3_vtag_cfg vtag_config;
LIST_HEAD(vlan_tbl, hns3_user_vlan_table) vlan_list;
struct hns3_pf pf;
struct hns3_vf vf;
};
+
+ bool rx_simple_allowed;
+ bool rx_vec_allowed;
+ bool tx_simple_allowed;
+ bool tx_vec_allowed;
+
+ struct hns3_ptype_table ptype_tbl __rte_cache_min_aligned;
};
#define HNS3_DEV_SUPPORT_DCB_B 0x0
#define HNS3_DEV_SUPPORT_COPPER_B 0x1
#define HNS3_DEV_SUPPORT_UDP_GSO_B 0x2
-#define HNS3_DEV_SUPPORT_ADQ_B 0x3
+#define HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B 0x3
#define HNS3_DEV_SUPPORT_PTP_B 0x4
#define HNS3_DEV_SUPPORT_TX_PUSH_B 0x5
#define HNS3_DEV_SUPPORT_INDEP_TXRX_B 0x6
#define hns3_dev_udp_gso_supported(hw) \
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_UDP_GSO_B)
-/* Support Application Device Queue */
-#define hns3_dev_adq_supported(hw) \
- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_ADQ_B)
+/* Support the queue region action rule of flow directory */
+#define hns3_dev_fd_queue_region_supported(hw) \
+ hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B)
/* Support PTP timestamp offload */
#define hns3_dev_ptp_supported(hw) \
#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(x) (1ULL << (x))
+
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#define GENMASK(h, l) \
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-#define max_t(type, x, y) ({ \
- type __max1 = (x); \
- type __max2 = (y); \
- __max1 > __max2 ? __max1 : __max2; })
-
+/*
+ * Because hardware always access register in little-endian mode based on hns3
+ * network engine, so driver should also call rte_cpu_to_le_32 to convert data
+ * in little-endian mode before writing register and call rte_le_to_cpu_32 to
+ * convert data after reading from register.
+ *
+ * Here the driver encapsulates the data conversion operation in the register
+ * read/write operation function as below:
+ * hns3_write_reg
+ * hns3_write_reg_opt
+ * hns3_read_reg
+ * Therefore, when calling these functions, conversion is not required again.
+ */
static inline void hns3_write_reg(void *base, uint32_t reg, uint32_t value)
{
- rte_write32(value, (volatile void *)((char *)base + reg));
+ rte_write32(rte_cpu_to_le_32(value),
+ (volatile void *)((char *)base + reg));
+}
+
+/*
+ * The optimized function for writing registers used in the '.rx_pkt_burst' and
+ * '.tx_pkt_burst' ops implementation function.
+ */
+static inline void hns3_write_reg_opt(volatile void *addr, uint32_t value)
+{
+ rte_io_wmb();
+ rte_write32_relaxed(rte_cpu_to_le_32(value), addr);
}
static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
{
- return rte_read32((volatile void *)((char *)base + reg));
+ uint32_t read_val = rte_read32((volatile void *)((char *)base + reg));
+ return rte_le_to_cpu_32(read_val);
}
#define hns3_write_dev(a, reg, value) \