X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fhns3%2Fhns3_mbx.c;h=61d15845e794a31dca8c2dfd8f3a42969fc21aa4;hb=25a3d65e1e52e34f36598e5909a51ee48e0061f2;hp=c03e71dfda5f6c4c9838837eba260d5b42d5ce24;hpb=3988ab0eee52b71a2599308f1ec3648b8d640eba;p=dpdk.git diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c index c03e71dfda..61d15845e7 100644 --- a/drivers/net/hns3/hns3_mbx.c +++ b/drivers/net/hns3/hns3_mbx.c @@ -2,29 +2,15 @@ * Copyright(c) 2018-2019 Hisilicon Limited. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include -#include -#include #include "hns3_ethdev.h" #include "hns3_regs.h" #include "hns3_logs.h" #include "hns3_intr.h" +#include "hns3_rxtx.h" -#define HNS3_REG_MSG_DATA_OFFSET 4 #define HNS3_CMD_CODE_OFFSET 2 static const struct errno_respcode_map err_code_map[] = { @@ -81,13 +67,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1, uint8_t *resp_data, uint16_t resp_len) { #define HNS3_MAX_RETRY_MS 500 + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); struct hns3_mbx_resp_status *mbx_resp; bool in_irq = false; uint64_t now; uint64_t end; if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) { - hns3_err(hw, "VF mbx response len(=%d) exceeds maximum(=%d)", + hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)", resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE); return -EINVAL; } @@ -96,6 +83,19 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1, end = now + HNS3_MAX_RETRY_MS; while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) && (now < end)) { + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { + hns3_err(hw, "Don't wait for mbx respone because of " + "disable_cmd"); + return -EBUSY; + } + + if (is_reset_pending(hns)) { + hw->mbx_resp.req_msg_data = 0; + hns3_err(hw, "Don't wait for mbx respone because of " + "reset pending"); + return -EIO; + } + /* * The mbox response is running on the interrupt thread. * Sending mbox in the interrupt thread cannot wait for the @@ -113,7 +113,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1, if (now >= end) { hw->mbx_resp.lost++; hns3_err(hw, - "VF could not get mbx(%d,%d) head(%d) tail(%d) lost(%d) from PF in_irq:%d", + "VF could not get mbx(%u,%u) head(%u) tail(%u) lost(%u) from PF in_irq:%d", code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail, hw->mbx_resp.lost, in_irq); return -ETIME; @@ -137,6 +137,8 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, { struct hns3_mbx_vf_to_pf_cmd *req; struct hns3_cmd_desc desc; + bool is_ring_vector_msg; + int offset; int ret; req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; @@ -144,16 +146,22 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, /* first two bytes are reserved for code & subcode */ if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) { hns3_err(hw, - "VF send mbx msg fail, msg len %d exceeds max payload len %d", + "VF send mbx msg fail, msg len %u exceeds max payload len %d", msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET); return -EINVAL; } hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); req->msg[0] = code; - req->msg[1] = subcode; - if (msg_data) - memcpy(&req->msg[HNS3_CMD_CODE_OFFSET], msg_data, msg_len); + is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) || + (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) || + (code == HNS3_MBX_GET_RING_VECTOR_MAP); + if (!is_ring_vector_msg) + req->msg[1] = subcode; + if (msg_data) { + offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET; + memcpy(&req->msg[offset], msg_data, msg_len); + } /* synchronous send */ if (need_resp) { @@ -195,9 +203,11 @@ hns3_cmd_crq_empty(struct hns3_hw *hw) static void hns3_mbx_handler(struct hns3_hw *hw) { - struct hns3_mac *mac = &hw->mac; enum hns3_reset_level reset_level; + uint8_t link_status, link_duplex; + uint32_t link_speed; uint16_t *msg_q; + uint8_t opcode; uint32_t tail; tail = hw->arq.tail; @@ -206,12 +216,14 @@ hns3_mbx_handler(struct hns3_hw *hw) while (tail != hw->arq.head) { msg_q = hw->arq.msg_q[hw->arq.head]; - switch (msg_q[0]) { + opcode = msg_q[0] & 0xff; + switch (opcode) { case HNS3_MBX_LINK_STAT_CHANGE: - memcpy(&mac->link_speed, &msg_q[2], - sizeof(mac->link_speed)); - mac->link_status = rte_le_to_cpu_16(msg_q[1]); - mac->link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); + memcpy(&link_speed, &msg_q[2], sizeof(link_speed)); + link_status = rte_le_to_cpu_16(msg_q[1]); + link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); + hns3vf_update_link_status(hw, link_status, link_speed, + link_duplex); break; case HNS3_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pending @@ -224,10 +236,11 @@ hns3_mbx_handler(struct hns3_hw *hw) hns3_warn(hw, "PF inform reset level %d", reset_level); hw->reset.stats.request_cnt++; + hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); break; default: - hns3_err(hw, "Fetched unsupported(%d) message from arq", - msg_q[0]); + hns3_err(hw, "Fetched unsupported(%u) message from arq", + opcode); break; } @@ -255,19 +268,92 @@ hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg) if (resp->lost) resp->lost--; hns3_warn(hw, "Received a mismatched response req_msg(%x) " - "resp_msg(%x) head(%d) tail(%d) lost(%d)", + "resp_msg(%x) head(%u) tail(%u) lost(%u)", resp->req_msg_data, resp_msg, resp->head, tail, resp->lost); } else if (tail + resp->lost > resp->head) { resp->lost--; hns3_warn(hw, "Received a new response again resp_msg(%x) " - "head(%d) tail(%d) lost(%d)", resp_msg, + "head(%u) tail(%u) lost(%u)", resp_msg, resp->head, tail, resp->lost); } rte_io_wmb(); resp->tail = tail; } +static void +hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code) +{ + switch (link_fail_code) { + case HNS3_MBX_LF_NORMAL: + break; + case HNS3_MBX_LF_REF_CLOCK_LOST: + hns3_warn(hw, "Reference clock lost!"); + break; + case HNS3_MBX_LF_XSFP_TX_DISABLE: + hns3_warn(hw, "SFP tx is disabled!"); + break; + case HNS3_MBX_LF_XSFP_ABSENT: + hns3_warn(hw, "SFP is absent!"); + break; + default: + hns3_warn(hw, "Unknown fail code:%u!", link_fail_code); + break; + } +} + +static void +hns3_handle_link_change_event(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) +{ +#define LINK_STATUS_OFFSET 1 +#define LINK_FAIL_CODE_OFFSET 2 + + if (!req->msg[LINK_STATUS_OFFSET]) + hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]); + + hns3_update_link_status_and_event(hw); +} + +static void +hns3_update_port_base_vlan_info(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) +{ +#define PVID_STATE_OFFSET 1 + uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ? + HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; + /* + * Currently, hardware doesn't support more than two layers VLAN offload + * based on hns3 network engine, which would cause packets loss or wrong + * packets for these types of packets. If the hns3 PF kernel ethdev + * driver sets the PVID for VF device after initialization of the + * related VF device, the PF driver will notify VF driver to update the + * PVID configuration state. The VF driver will update the PVID + * configuration state immediately to ensure that the VLAN process in Tx + * and Rx is correct. But in the window period of this state transition, + * packets loss or packets with wrong VLAN may occur. + */ + if (hw->port_base_vlan_cfg.state != new_pvid_state) { + hw->port_base_vlan_cfg.state = new_pvid_state; + hns3_update_all_queues_pvid_proc_en(hw); + } +} + +static void +hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en) +{ + if (!promisc_en) { + /* + * When promisc/allmulti mode is closed by the hns3 PF kernel + * ethdev driver for untrusted, modify VF's related status. + */ + hns3_warn(hw, "Promisc mode will be closed by host for being " + "untrusted."); + hw->data->promiscuous = 0; + hw->data->all_multicast = 0; + } +} + void hns3_dev_handle_mbx_msg(struct hns3_hw *hw) { @@ -277,22 +363,24 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) struct hns3_cmd_desc *desc; uint32_t msg_data; uint16_t *msg_q; + uint8_t opcode; uint16_t flag; uint8_t *temp; int i; while (!hns3_cmd_crq_empty(hw)) { - if (rte_atomic16_read(&hw->reset.disable_cmd)) + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) return; desc = &crq->desc[crq->next_to_use]; req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; + opcode = req->msg[0] & 0xff; flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { hns3_warn(hw, - "dropped invalid mailbox message, code = %d", - req->msg[0]); + "dropped invalid mailbox message, code = %u", + opcode); /* dropping/not processing this invalid message */ crq->desc[crq->next_to_use].flag = 0; @@ -300,13 +388,12 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) continue; } - switch (req->msg[0]) { + switch (opcode) { case HNS3_MBX_PF_VF_RESP: resp->resp_status = hns3_resp_to_errno(req->msg[3]); temp = (uint8_t *)&req->msg[4]; - for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE && - i < HNS3_REG_MSG_DATA_OFFSET; i++) { + for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE; i++) { resp->additional_info[i] = *temp; temp++; } @@ -322,9 +409,28 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) hns3_mbx_handler(hw); break; + case HNS3_MBX_PUSH_LINK_STATUS: + hns3_handle_link_change_event(hw, req); + break; + case HNS3_MBX_PUSH_VLAN_INFO: + /* + * When the PVID configuration status of VF device is + * changed by the hns3 PF kernel driver, VF driver will + * receive this mailbox message from PF driver. + */ + hns3_update_port_base_vlan_info(hw, req); + break; + case HNS3_MBX_PUSH_PROMISC_INFO: + /* + * When the trust status of VF device changed by the + * hns3 PF kernel driver, VF driver will receive this + * mailbox message from PF driver. + */ + hns3_handle_promisc_info(hw, req->msg[1]); + break; default: hns3_err(hw, - "VF received unsupported(%d) mbx msg from PF", + "VF received unsupported(%u) mbx msg from PF", req->msg[0]); break; }