X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fbase%2Fi40e_nvm.c;h=a1e78300dea86037e32c1873319922348fba3c85;hb=1ff8e79591a6cc99c475a50e87adb30d9b20569a;hp=04e422faf3b55d006ce4721a3c0ab3efc2eae292;hpb=c906def2694236cdea69188974a552bd0124584c;p=dpdk.git diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c index 04e422faf3..a1e78300de 100644 --- a/drivers/net/i40e/base/i40e_nvm.c +++ b/drivers/net/i40e/base/i40e_nvm.c @@ -219,19 +219,15 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, { enum i40e_status_code ret_code = I40E_SUCCESS; -#ifdef X722_SUPPORT - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { ret_code = i40e_read_nvm_word_aq(hw, offset, data); - i40e_release_nvm(hw); + } else { + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); } - } else { - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + i40e_release_nvm(hw); } -#else - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); -#endif return ret_code; } @@ -249,14 +245,10 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, { enum i40e_status_code ret_code = I40E_SUCCESS; -#ifdef X722_SUPPORT if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) ret_code = i40e_read_nvm_word_aq(hw, offset, data); else ret_code = i40e_read_nvm_word_srctl(hw, offset, data); -#else - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); -#endif return ret_code; } @@ -348,14 +340,10 @@ enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, { enum i40e_status_code ret_code = I40E_SUCCESS; -#ifdef X722_SUPPORT if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data); else ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); -#else - ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); -#endif return ret_code; } @@ -375,7 +363,6 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, { enum i40e_status_code ret_code = I40E_SUCCESS; -#ifdef X722_SUPPORT if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (!ret_code) { @@ -386,9 +373,6 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, } else { ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); } -#else - ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); -#endif return ret_code; } @@ -765,12 +749,18 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, DEBUGFUNC("i40e_validate_nvm_checksum"); - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + /* acquire_nvm provides exclusive NVM lock to synchronize access across + * PFs. X710 uses i40e_read_nvm_word_srctl which polls for done bit + * twice (first time to be able to write address to I40E_GLNVM_SRCTL + * register, second to read data from I40E_GLNVM_SRDATA. One PF can see + * done bit and try to write address, while another one will interpret + * it as a good time to read data. It will cause invalid data to be + * read. + */ + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (!ret_code) { ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) - i40e_release_nvm(hw); + i40e_release_nvm(hw); if (ret_code != I40E_SUCCESS) goto i40e_validate_nvm_checksum_exit; } else { @@ -872,10 +862,10 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, /* early check for status command and debug msgs */ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", i40e_nvm_update_state_str[upd_cmd], hw->nvmupd_state, - hw->nvm_release_on_done, + hw->nvm_release_on_done, hw->nvm_wait_opcode, cmd->command, cmd->config, cmd->offset, cmd->data_size); if (upd_cmd == I40E_NVMUPD_INVALID) { @@ -889,10 +879,37 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, * going into the state machine */ if (upd_cmd == I40E_NVMUPD_STATUS) { + if (!cmd->data_size) { + *perrno = -EFAULT; + return I40E_ERR_BUF_TOO_SHORT; + } + bytes[0] = hw->nvmupd_state; + + if (cmd->data_size >= 4) { + bytes[1] = 0; + *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; + } + + /* Clear error status on read */ + if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + return I40E_SUCCESS; } + /* Clear status even it is not read and log */ + if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { + i40e_debug(hw, I40E_DEBUG_NVM, + "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } + + /* Acquire lock to prevent race condition where adminq_task + * can execute after i40e_nvmupd_nvm_read/write but before state + * variables (nvm_wait_opcode, nvm_release_on_done) are updated + */ + i40e_acquire_spinlock(&hw->aq.arq_spinlock); switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT: status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); @@ -908,6 +925,14 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, case I40E_NVMUPD_STATE_INIT_WAIT: case I40E_NVMUPD_STATE_WRITE_WAIT: + /* if we need to stop waiting for an event, clear + * the wait info and return before doing anything else + */ + if (cmd->offset == 0xffff) { + i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); + return I40E_SUCCESS; + } + status = I40E_ERR_NOT_READY; *perrno = -EBUSY; break; @@ -920,6 +945,7 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, *perrno = -ESRCH; break; } + i40e_release_spinlock(&hw->aq.arq_spinlock); return status; } @@ -981,6 +1007,7 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, i40e_release_nvm(hw); } else { hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -997,6 +1024,7 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, i40e_release_nvm(hw); } else { hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -1009,10 +1037,12 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); - if (status) + if (status) { i40e_release_nvm(hw); - else + } else { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } } break; @@ -1031,6 +1061,7 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, i40e_release_nvm(hw); } else { hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -1125,8 +1156,10 @@ retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); - if (!status) + if (!status) { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } break; case I40E_NVMUPD_WRITE_LCB: @@ -1139,6 +1172,7 @@ retry: hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; @@ -1153,6 +1187,7 @@ retry: -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; @@ -1168,6 +1203,7 @@ retry: hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; @@ -1216,6 +1252,43 @@ retry: return status; } +/** + * i40e_nvmupd_check_wait_event - handle NVM update operation events + * @hw: pointer to the hardware structure + * @opcode: the event that just happened + **/ +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode) +{ + if (opcode == hw->nvm_wait_opcode) { + + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: clearing wait on opcode 0x%04x\n", opcode); + if (hw->nvm_release_on_done) { + i40e_release_nvm(hw); + hw->nvm_release_on_done = false; + } + hw->nvm_wait_opcode = 0; + + if (hw->aq.arq_last_status) { + hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; + return; + } + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; + + default: + break; + } + } +} + /** * i40e_nvmupd_validate_command - Validate given command * @hw: pointer to hardware structure @@ -1363,7 +1436,8 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, if (hw->nvm_buff.va) { buff = hw->nvm_buff.va; - memcpy(buff, &bytes[aq_desc_len], aq_data_len); + i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len, + I40E_NONDMA_TO_NONDMA); } } @@ -1378,6 +1452,12 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } + /* should we wait for a followup event? */ + if (cmd->offset) { + hw->nvm_wait_opcode = cmd->offset; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + return status; } @@ -1430,7 +1510,7 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, __func__, cmd->offset, cmd->offset + len); buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; - memcpy(bytes, buff, len); + i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA); bytes += len; remainder -= len; @@ -1444,7 +1524,7 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", __func__, start_byte, start_byte + remainder); - memcpy(bytes, buff, remainder); + i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA); } return I40E_SUCCESS;