X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fbase%2Fi40e_adminq.c;h=0d3a83fa400f6b4cb307025fb5c736824e9daad1;hb=75c3de654eadadfbccf8b6d3d26496453a379723;hp=8f0a63b72ed2c9c6a882d28813f6d95b4daf6b96;hpb=305f81dbd76066882cb3d57fc9d3a2f7cfee28b8;p=dpdk.git diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c index 8f0a63b72e..0d3a83fa40 100644 --- a/drivers/net/i40e/base/i40e_adminq.c +++ b/drivers/net/i40e/base/i40e_adminq.c @@ -37,18 +37,6 @@ POSSIBILITY OF SUCH DAMAGE. #include "i40e_adminq.h" #include "i40e_prototype.h" -#ifdef PF_DRIVER -/** - * i40e_is_nvm_update_op - return true if this is an NVM update operation - * @desc: API request descriptor - **/ -STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) -{ - return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) || - desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update)); -} - -#endif /* PF_DRIVER */ /** * i40e_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure @@ -69,6 +57,7 @@ STATIC void i40e_adminq_init_regs(struct i40e_hw *hw) hw->aq.arq.len = I40E_VF_ARQLEN1; hw->aq.arq.bal = I40E_VF_ARQBAL1; hw->aq.arq.bah = I40E_VF_ARQBAH1; +#ifdef PF_DRIVER } else { hw->aq.asq.tail = I40E_PF_ATQT; hw->aq.asq.head = I40E_PF_ATQH; @@ -80,6 +69,7 @@ STATIC void i40e_adminq_init_regs(struct i40e_hw *hw) hw->aq.arq.len = I40E_PF_ARQLEN; hw->aq.arq.bal = I40E_PF_ARQBAL; hw->aq.arq.bah = I40E_PF_ARQBAH; +#endif } } @@ -316,8 +306,26 @@ STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw) wr32(hw, hw->aq.asq.tail, 0); /* set starting point */ +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_PF_ATQLEN_ATQENABLE_MASK)); +#else wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER +#ifdef INTEGRATED_VF + if (i40e_is_vf(hw)) + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_VF_ATQLEN1_ATQENABLE_MASK)); +#else + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_VF_ATQLEN1_ATQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* VF_DRIVER */ wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)); wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa)); @@ -345,8 +353,26 @@ STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw) wr32(hw, hw->aq.arq.tail, 0); /* set starting point */ +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_PF_ARQLEN_ARQENABLE_MASK)); +#else wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | I40E_PF_ARQLEN_ARQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER +#ifdef INTEGRATED_VF + if (i40e_is_vf(hw)) + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_VF_ARQLEN1_ARQENABLE_MASK)); +#else + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_VF_ARQLEN1_ARQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* VF_DRIVER */ wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)); wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa)); @@ -393,7 +419,6 @@ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -411,6 +436,7 @@ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -452,7 +478,6 @@ enum i40e_status_code i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -470,6 +495,7 @@ enum i40e_status_code i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -546,6 +572,26 @@ shutdown_arq_out: i40e_release_spinlock(&hw->aq.arq_spinlock); return ret_code; } +#ifdef PF_DRIVER + +/** + * i40e_resume_aq - resume AQ processing from 0 + * @hw: pointer to the hardware structure + **/ +STATIC void i40e_resume_aq(struct i40e_hw *hw) +{ + /* Registers are reset after PF reset */ + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + i40e_config_asq_regs(hw); + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + i40e_config_arq_regs(hw); +} +#endif /* PF_DRIVER */ /** * i40e_init_adminq - main initialization routine for Admin Queue @@ -560,11 +606,15 @@ shutdown_arq_out: **/ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) { - enum i40e_status_code ret_code; #ifdef PF_DRIVER + u16 cfg_ptr, oem_hi, oem_lo; u16 eetrack_lo, eetrack_hi; +#endif + enum i40e_status_code ret_code; +#ifdef PF_DRIVER int retry = 0; #endif + /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.num_asq_entries == 0) || @@ -573,8 +623,6 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) ret_code = I40E_ERR_CONFIG; goto init_adminq_exit; } - - /* initialize spin locks */ i40e_init_spinlock(&hw->aq.asq_spinlock); i40e_init_spinlock(&hw->aq.arq_spinlock); @@ -627,6 +675,12 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); + i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), + &oem_hi); + i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), + &oem_lo); + hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; @@ -635,13 +689,9 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) /* pre-emptive resource lock release */ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); - hw->aq.nvm_release_on_done = false; + hw->nvm_release_on_done = false; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - ret_code = i40e_aq_set_hmc_resource_profile(hw, - I40E_HMC_PROFILE_DEFAULT, - 0, - NULL); #endif /* PF_DRIVER */ ret_code = I40E_SUCCESS; @@ -675,11 +725,12 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - - /* destroy the spinlocks */ i40e_destroy_spinlock(&hw->aq.asq_spinlock); i40e_destroy_spinlock(&hw->aq.arq_spinlock); + if (hw->nvm_buff.va) + i40e_free_virt_mem(hw, &hw->nvm_buff); + return ret_code; } @@ -701,14 +752,13 @@ u16 i40e_clean_asq(struct i40e_hw *hw) details = I40E_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "%s: ntc %d head %d.\n", __FUNCTION__, ntc, - rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = (I40E_ADMINQ_CALLBACK)details->callback; - i40e_memcpy(&desc_cb, desc, - sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA); + i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc), + I40E_DMA_TO_DMA); cb_func(hw, &desc_cb); } i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); @@ -732,7 +782,11 @@ u16 i40e_clean_asq(struct i40e_hw *hw) * Returns true if the firmware has processed all descriptors on the * admin send queue. Returns false if there are still requests pending. **/ +#ifdef VF_DRIVER bool i40e_asq_done(struct i40e_hw *hw) +#else +STATIC bool i40e_asq_done(struct i40e_hw *hw) +#endif { /* AQ designers suggest use of head for better * timing reliability than DD bit @@ -890,7 +944,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, */ if (i40e_asq_done(hw)) break; - /* ugh! delay while spin_lock */ i40e_msec_delay(1); total_delay++; } while (total_delay < hw->aq.asq_cmd_timeout); @@ -984,11 +1037,36 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, u16 flags; u16 ntu; + /* pre-clean the event info */ + i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM); + /* take the lock before we start messing with the ring */ i40e_acquire_spinlock(&hw->aq.arq_spinlock); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); +#else ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); +#endif /* INTEGRATED_VF */ +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER +#ifdef INTEGRATED_VF + if (i40e_is_vf(hw)) + ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); +#else + ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); +#endif /* INTEGRATED_VF */ +#endif /* VF_DRIVER */ if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; @@ -1046,37 +1124,16 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; +#ifdef PF_DRIVER + i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode)); +#endif /* PF_DRIVER */ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); +clean_arq_element_err: i40e_release_spinlock(&hw->aq.arq_spinlock); -#ifdef PF_DRIVER - if (i40e_is_nvm_update_op(&e->desc)) { - if (hw->aq.nvm_release_on_done) { - i40e_release_nvm(hw); - hw->aq.nvm_release_on_done = false; - } - } - -#endif return ret_code; } -void i40e_resume_aq(struct i40e_hw *hw) -{ - /* Registers are reset after PF reset */ - hw->aq.asq.next_to_use = 0; - hw->aq.asq.next_to_clean = 0; - -#if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK) -#error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK -#endif - i40e_config_asq_regs(hw); - - hw->aq.arq.next_to_use = 0; - hw->aq.arq.next_to_clean = 0; - - i40e_config_arq_regs(hw); -}