1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
16 #include <rte_ethdev_driver.h>
18 #include <rte_spinlock.h>
20 #include <rte_bus_pci.h>
22 #include "hns3_ethdev.h"
23 #include "hns3_regs.h"
24 #include "hns3_logs.h"
25 #include "hns3_intr.h"
26 #include "hns3_rxtx.h"
28 #define HNS3_CMD_CODE_OFFSET 2
30 static const struct errno_respcode_map err_code_map[] = {
44 hns3_resp_to_errno(uint16_t resp_code)
48 num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
49 for (i = 0; i < num; i++) {
50 if (err_code_map[i].resp_code == resp_code)
51 return err_code_map[i].err_no;
58 hns3_poll_all_sync_msg(void)
60 struct rte_eth_dev *eth_dev;
61 struct hns3_adapter *adapter;
65 RTE_ETH_FOREACH_DEV(port_id) {
66 eth_dev = &rte_eth_devices[port_id];
67 name = eth_dev->device->driver->name;
68 if (strcmp(name, "net_hns3") && strcmp(name, "net_hns3_vf"))
70 adapter = eth_dev->data->dev_private;
71 if (!adapter || adapter->hw.adapter_state == HNS3_NIC_CLOSED)
73 /* Synchronous msg, the mbx_resp.req_msg_data is non-zero */
74 if (adapter->hw.mbx_resp.req_msg_data)
75 hns3_dev_handle_mbx_msg(&adapter->hw);
80 hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
81 uint8_t *resp_data, uint16_t resp_len)
83 #define HNS3_MAX_RETRY_MS 500
84 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
85 struct hns3_mbx_resp_status *mbx_resp;
90 if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
91 hns3_err(hw, "VF mbx response len(=%d) exceeds maximum(=%d)",
92 resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
96 now = get_timeofday_ms();
97 end = now + HNS3_MAX_RETRY_MS;
98 while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
100 if (rte_atomic16_read(&hw->reset.disable_cmd)) {
101 hns3_err(hw, "Don't wait for mbx respone because of "
106 if (is_reset_pending(hns)) {
107 hw->mbx_resp.req_msg_data = 0;
108 hns3_err(hw, "Don't wait for mbx respone because of "
114 * The mbox response is running on the interrupt thread.
115 * Sending mbox in the interrupt thread cannot wait for the
116 * response, so polling the mbox response on the irq thread.
118 if (pthread_equal(hw->irq_thread_id, pthread_self())) {
120 hns3_poll_all_sync_msg();
122 rte_delay_ms(HNS3_POLL_RESPONE_MS);
124 now = get_timeofday_ms();
126 hw->mbx_resp.req_msg_data = 0;
130 "VF could not get mbx(%d,%d) head(%d) tail(%d) lost(%d) from PF in_irq:%d",
131 code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail,
132 hw->mbx_resp.lost, in_irq);
136 mbx_resp = &hw->mbx_resp;
138 if (mbx_resp->resp_status)
139 return mbx_resp->resp_status;
142 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
148 hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
149 const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
150 uint8_t *resp_data, uint16_t resp_len)
152 struct hns3_mbx_vf_to_pf_cmd *req;
153 struct hns3_cmd_desc desc;
154 bool is_ring_vector_msg;
158 req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
160 /* first two bytes are reserved for code & subcode */
161 if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
163 "VF send mbx msg fail, msg len %d exceeds max payload len %d",
164 msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
168 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
170 is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
171 (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
172 (code == HNS3_MBX_GET_RING_VECTOR_MAP);
173 if (!is_ring_vector_msg)
174 req->msg[1] = subcode;
176 offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
177 memcpy(&req->msg[offset], msg_data, msg_len);
180 /* synchronous send */
182 req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
183 rte_spinlock_lock(&hw->mbx_resp.lock);
184 hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
186 ret = hns3_cmd_send(hw, &desc, 1);
188 rte_spinlock_unlock(&hw->mbx_resp.lock);
189 hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
194 ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
195 rte_spinlock_unlock(&hw->mbx_resp.lock);
197 /* asynchronous send */
198 ret = hns3_cmd_send(hw, &desc, 1);
200 hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
210 hns3_cmd_crq_empty(struct hns3_hw *hw)
212 uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
214 return tail == hw->cmq.crq.next_to_use;
218 hns3_mbx_handler(struct hns3_hw *hw)
220 struct hns3_mac *mac = &hw->mac;
221 enum hns3_reset_level reset_level;
228 /* process all the async queue messages */
229 while (tail != hw->arq.head) {
230 msg_q = hw->arq.msg_q[hw->arq.head];
232 opcode = msg_q[0] & 0xff;
234 case HNS3_MBX_LINK_STAT_CHANGE:
235 memcpy(&mac->link_speed, &msg_q[2],
236 sizeof(mac->link_speed));
237 mac->link_status = rte_le_to_cpu_16(msg_q[1]);
238 mac->link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
240 case HNS3_MBX_ASSERTING_RESET:
241 /* PF has asserted reset hence VF should go in pending
242 * state and poll for the hardware reset status till it
243 * has been completely reset. After this stack should
244 * eventually be re-initialized.
246 reset_level = rte_le_to_cpu_16(msg_q[1]);
247 hns3_atomic_set_bit(reset_level, &hw->reset.pending);
249 hns3_warn(hw, "PF inform reset level %d", reset_level);
250 hw->reset.stats.request_cnt++;
251 hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
254 hns3_err(hw, "Fetched unsupported(%d) message from arq",
259 hns3_mbx_head_ptr_move_arq(hw->arq);
260 msg_q = hw->arq.msg_q[hw->arq.head];
265 * Case1: receive response after timeout, req_msg_data
266 * is 0, not equal resp_msg, do lost--
267 * Case2: receive last response during new send_mbx_msg,
268 * req_msg_data is different with resp_msg, let
269 * lost--, continue to wait for response.
272 hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
274 struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
275 uint32_t tail = resp->tail + 1;
277 if (tail > resp->head)
279 if (resp->req_msg_data != resp_msg) {
282 hns3_warn(hw, "Received a mismatched response req_msg(%x) "
283 "resp_msg(%x) head(%d) tail(%d) lost(%d)",
284 resp->req_msg_data, resp_msg, resp->head, tail,
286 } else if (tail + resp->lost > resp->head) {
288 hns3_warn(hw, "Received a new response again resp_msg(%x) "
289 "head(%d) tail(%d) lost(%d)", resp_msg,
290 resp->head, tail, resp->lost);
297 hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
299 switch (link_fail_code) {
300 case HNS3_MBX_LF_NORMAL:
302 case HNS3_MBX_LF_REF_CLOCK_LOST:
303 hns3_warn(hw, "Reference clock lost!");
305 case HNS3_MBX_LF_XSFP_TX_DISABLE:
306 hns3_warn(hw, "SFP tx is disabled!");
308 case HNS3_MBX_LF_XSFP_ABSENT:
309 hns3_warn(hw, "SFP is absent!");
312 hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
318 hns3_handle_link_change_event(struct hns3_hw *hw,
319 struct hns3_mbx_pf_to_vf_cmd *req)
321 #define LINK_STATUS_OFFSET 1
322 #define LINK_FAIL_CODE_OFFSET 2
324 if (!req->msg[LINK_STATUS_OFFSET])
325 hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
327 hns3_update_link_status(hw);
331 hns3_update_port_base_vlan_info(struct hns3_hw *hw,
332 struct hns3_mbx_pf_to_vf_cmd *req)
334 #define PVID_STATE_OFFSET 1
335 uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
336 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
338 * Currently, hardware doesn't support more than two layers VLAN offload
339 * based on hns3 network engine, which would cause packets loss or wrong
340 * packets for these types of packets. If the hns3 PF kernel ethdev
341 * driver sets the PVID for VF device after initialization of the
342 * related VF device, the PF driver will notify VF driver to update the
343 * PVID configuration state. The VF driver will update the PVID
344 * configuration state immediately to ensure that the VLAN process in Tx
345 * and Rx is correct. But in the window period of this state transition,
346 * packets loss or packets with wrong VLAN may occur.
348 if (hw->port_base_vlan_cfg.state != new_pvid_state) {
349 hw->port_base_vlan_cfg.state = new_pvid_state;
350 hns3_update_all_queues_pvid_proc_en(hw);
355 hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
359 * When promisc/allmulti mode is closed by the hns3 PF kernel
360 * ethdev driver for untrusted, modify VF's related status.
362 hns3_warn(hw, "Promisc mode will be closed by host for being "
364 hw->data->promiscuous = 0;
365 hw->data->all_multicast = 0;
370 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
372 struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
373 struct hns3_cmq_ring *crq = &hw->cmq.crq;
374 struct hns3_mbx_pf_to_vf_cmd *req;
375 struct hns3_cmd_desc *desc;
383 while (!hns3_cmd_crq_empty(hw)) {
384 if (rte_atomic16_read(&hw->reset.disable_cmd))
387 desc = &crq->desc[crq->next_to_use];
388 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
389 opcode = req->msg[0] & 0xff;
391 flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
392 if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
394 "dropped invalid mailbox message, code = %d",
397 /* dropping/not processing this invalid message */
398 crq->desc[crq->next_to_use].flag = 0;
399 hns3_mbx_ring_ptr_move_crq(crq);
404 case HNS3_MBX_PF_VF_RESP:
405 resp->resp_status = hns3_resp_to_errno(req->msg[3]);
407 temp = (uint8_t *)&req->msg[4];
408 for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE; i++) {
409 resp->additional_info[i] = *temp;
412 msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
413 hns3_update_resp_position(hw, msg_data);
415 case HNS3_MBX_LINK_STAT_CHANGE:
416 case HNS3_MBX_ASSERTING_RESET:
417 msg_q = hw->arq.msg_q[hw->arq.tail];
418 memcpy(&msg_q[0], req->msg,
419 HNS3_MBX_MAX_ARQ_MSG_SIZE * sizeof(uint16_t));
420 hns3_mbx_tail_ptr_move_arq(hw->arq);
422 hns3_mbx_handler(hw);
424 case HNS3_MBX_PUSH_LINK_STATUS:
425 hns3_handle_link_change_event(hw, req);
427 case HNS3_MBX_PUSH_VLAN_INFO:
429 * When the PVID configuration status of VF device is
430 * changed by the hns3 PF kernel driver, VF driver will
431 * receive this mailbox message from PF driver.
433 hns3_update_port_base_vlan_info(hw, req);
435 case HNS3_MBX_PUSH_PROMISC_INFO:
437 * When the trust status of VF device changed by the
438 * hns3 PF kernel driver, VF driver will receive this
439 * mailbox message from PF driver.
441 hns3_handle_promisc_info(hw, req->msg[1]);
445 "VF received unsupported(%d) mbx msg from PF",
450 crq->desc[crq->next_to_use].flag = 0;
451 hns3_mbx_ring_ptr_move_crq(crq);
454 /* Write back CMDQ_RQ header pointer, IMP need this pointer */
455 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);