static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
{
- /*
- * Force global configuration for flexible payload
- * to the first 16 bytes of the corresponding L2/L3/L4 paylod.
- * This should be removed from code once proper
- * configuration API is added to avoid configuration conflicts
- * between ports of the same device.
- */
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
-
/*
* Initialize registers for parsing packet type of QinQ
* This should be removed from code once proper
/*
* To work around the NVM issue, initialize registers
- * for flexible payload and packet type of QinQ by
- * software. It should be removed once issues are fixed
- * in NVM.
+ * for packet type of QinQ by software.
+ * It should be removed once issues are fixed in NVM.
*/
i40e_GLQF_reg_init(hw);
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+
+ /* By default disable flexible payload in global configuration */
+ i40e_flex_payload_reg_set_default(hw);
+
/*
* Add an ethertype filter to drop all flow control frames transmitted
* from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
}
}
+void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
+{
+ /*
+ * Disable by default flexible payload
+ * for corresponding L2/L3/L4 layers.
+ */
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
+}
+
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
i40e_res_pool_destroy(&pf->qp_pool);
i40e_res_pool_destroy(&pf->msix_pool);
+ /* Disable flexible payload in global configuration */
+ i40e_flex_payload_reg_set_default(hw);
+
/* force a PF reset to clean anything leftover */
reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
- uint32_t flx_pit;
+ uint32_t flx_pit, flx_ort;
uint16_t num, min_next_off; /* in words */
uint8_t field_idx = 0;
uint8_t layer_idx = 0;
layer_idx = I40E_FLXPLD_L4_IDX;
memset(flex_pit, 0, sizeof(flex_pit));
- num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
+ num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
+ RTE_DIM(flex_pit));
- for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) {
+ if (num) {
+ flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
+ (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
+ (layer_idx * I40E_MAX_FLXPLD_FIED);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+ }
+
+ for (i = 0; i < num; i++) {
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
/* record the info in fdir structure */
pf->fdir.flex_set[field_idx].src_offset =
uint8_t raw_id)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t flx_pit;
+ uint32_t flx_pit, flx_ort;
uint8_t field_idx;
uint16_t min_next_off = 0; /* in words */
uint8_t i;
+ if (raw_id) {
+ flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
+ (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
+ (layer_idx * I40E_MAX_FLXPLD_FIED);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+ }
+
/* Set flex pit */
for (i = 0; i < raw_id; i++) {
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;