hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
csq->next_to_use, csq->next_to_clean);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1,
+ __ATOMIC_RELAXED);
hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
}
if (hns3_cmd_csq_done(hw))
return 0;
- if (rte_atomic16_read(&hw->reset.disable_cmd)) {
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
hns3_err(hw,
"Don't wait for reply because of disable_cmd");
return -EBUSY;
int retval;
uint32_t ntc;
- if (rte_atomic16_read(&hw->reset.disable_cmd))
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
return -EBUSY;
rte_spinlock_lock(&hw->cmq.csq.lock);
ret = -EBUSY;
goto err_cmd_init;
}
- rte_atomic16_clear(&hw->reset.disable_cmd);
+ __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
ret = hns3_cmd_query_firmware_version_and_capability(hw);
if (ret) {
return 0;
err_cmd_init:
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
return ret;
}
{
rte_spinlock_lock(&hw->cmq.csq.lock);
rte_spinlock_lock(&hw->cmq.crq.lock);
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
hns3_cmd_clear_regs(hw);
rte_spinlock_unlock(&hw->cmq.crq.lock);
rte_spinlock_unlock(&hw->cmq.csq.lock);
{
struct hns3_hw *hw = &hns->hw;
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
if (!is_delay) {
{
struct hns3_hw *hw = &hns->hw;
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
if (!is_delay) {
return ret;
hw->mac.link_status = ETH_LINK_DOWN;
- if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
* any mailbox handling or command to firmware is only valid
* after hns3_cmd_init is called.
*/
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
hw->reset.stats.request_cnt++;
break;
case HNS3_IMP_RESET:
* from table space. Hence, for function reset software intervention is
* required to delete the entries
*/
- if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_DEFERRED) {
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ __ATOMIC_RELAXED);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
}
}
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
/*
* Check if there is any ongoing reset in the hardware. This status can
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
} else {
struct hns3_reset_data {
enum hns3_reset_stage stage;
- rte_atomic16_t schedule;
+ uint16_t schedule;
/* Reset flag, covering the entire reset process */
uint16_t resetting;
/* Used to disable sending cmds during reset */
- rte_atomic16_t disable_cmd;
+ uint16_t disable_cmd;
/* The reset level being processed */
enum hns3_reset_level level;
/* Reset level set, each bit represents a reset level */
rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
val = hns3_read_dev(hw, HNS3_VF_RST_ING);
hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
hw->mac.link_status = ETH_LINK_DOWN;
- if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
hns3vf_configure_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
0, true, NULL, 0);
}
- rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
return ret;
}
* from table space. Hence, for function reset software intervention is
* required to delete the entries.
*/
- if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
hns3vf_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_DEFERRED) {
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ __ATOMIC_RELAXED);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
}
}
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
/*
* Hardware reset has been notified, we now have to poll & check if
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
} else {
hw->reset.request = 0;
hw->reset.pending = 0;
hw->reset.resetting = 0;
- rte_atomic16_init(&hw->reset.disable_cmd);
+ __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
hw->reset.wait_data = rte_zmalloc("wait_data",
sizeof(struct hns3_wait_data), 0);
if (!hw->reset.wait_data) {
/* Reschedule the reset process after successful initialization */
if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_PENDING);
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
+ __ATOMIC_RELAXED);
return;
}
return;
/* Schedule restart alarm if it is not scheduled yet */
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_REQUESTED)
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_REQUESTED)
return;
- if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED)
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ SCHEDULE_DEFERRED)
rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ __ATOMIC_RELAXED);
rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
}
return;
}
- if (rte_atomic16_read(&hns->hw.reset.schedule) != SCHEDULE_NONE)
+ if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
+ SCHEDULE_NONE)
return;
- rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_DEFERRED);
+ __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
+ __ATOMIC_RELAXED);
rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
}
* Regardless of whether the execution is successful or not, the
* flow after execution must be continued.
*/
- if (rte_atomic16_read(&hw->reset.disable_cmd))
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
(void)hns3_cmd_init(hw);
reset_fail:
hw->reset.attempts = 0;
end = now + HNS3_MAX_RETRY_MS;
while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
(now < end)) {
- if (rte_atomic16_read(&hw->reset.disable_cmd)) {
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
hns3_err(hw, "Don't wait for mbx respone because of "
"disable_cmd");
return -EBUSY;
int i;
while (!hns3_cmd_crq_empty(hw)) {
- if (rte_atomic16_read(&hw->reset.disable_cmd))
+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
return;
desc = &crq->desc[crq->next_to_use];