1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
5 #include <ethdev_driver.h>
8 #include "hns3_common.h"
10 #include "hns3_logs.h"
11 #include "hns3_intr.h"
12 #include "hns3_rxtx.h"
14 #define HNS3_CMD_CODE_OFFSET 2
16 static const struct errno_respcode_map err_code_map[] = {
30 hns3_resp_to_errno(uint16_t resp_code)
34 num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
35 for (i = 0; i < num; i++) {
36 if (err_code_map[i].resp_code == resp_code)
37 return err_code_map[i].err_no;
44 hns3_mbx_proc_timeout(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
46 if (hw->mbx_resp.matching_scheme ==
47 HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) {
50 "VF could not get mbx(%u,%u) head(%u) tail(%u) "
52 code, subcode, hw->mbx_resp.head, hw->mbx_resp.tail,
57 hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode);
61 hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
62 uint8_t *resp_data, uint16_t resp_len)
64 #define HNS3_WAIT_RESP_US 100
65 #define US_PER_MS 1000
66 uint32_t mbx_time_limit;
67 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
68 struct hns3_mbx_resp_status *mbx_resp;
69 uint32_t wait_time = 0;
72 if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
73 hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
74 resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
78 mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
79 while (wait_time < mbx_time_limit) {
80 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
81 hns3_err(hw, "Don't wait for mbx response because of "
86 if (is_reset_pending(hns)) {
87 hw->mbx_resp.req_msg_data = 0;
88 hns3_err(hw, "Don't wait for mbx response because of "
93 hns3_dev_handle_mbx_msg(hw);
94 rte_delay_us(HNS3_WAIT_RESP_US);
96 if (hw->mbx_resp.matching_scheme ==
97 HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL)
98 received = (hw->mbx_resp.head ==
99 hw->mbx_resp.tail + hw->mbx_resp.lost);
101 received = hw->mbx_resp.received_match_resp;
105 wait_time += HNS3_WAIT_RESP_US;
107 hw->mbx_resp.req_msg_data = 0;
108 if (wait_time >= mbx_time_limit) {
109 hns3_mbx_proc_timeout(hw, code, subcode);
113 mbx_resp = &hw->mbx_resp;
115 if (mbx_resp->resp_status)
116 return mbx_resp->resp_status;
119 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
125 hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
128 * Init both matching scheme fields because we may not know the exact
129 * scheme will be used when in the initial phase.
131 * Also, there are OK to init both matching scheme fields even though
132 * we get the exact scheme which is used.
134 hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
137 /* Update match_id and ensure the value of match_id is not zero */
138 hw->mbx_resp.match_id++;
139 if (hw->mbx_resp.match_id == 0)
140 hw->mbx_resp.match_id = 1;
141 hw->mbx_resp.received_match_resp = false;
143 hw->mbx_resp.resp_status = 0;
144 memset(hw->mbx_resp.additional_info, 0, HNS3_MBX_MAX_RESP_DATA_SIZE);
148 hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
149 const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
150 uint8_t *resp_data, uint16_t resp_len)
152 struct hns3_mbx_vf_to_pf_cmd *req;
153 struct hns3_cmd_desc desc;
154 bool is_ring_vector_msg;
158 req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
160 /* first two bytes are reserved for code & subcode */
161 if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
163 "VF send mbx msg fail, msg len %u exceeds max payload len %d",
164 msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
168 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
170 is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
171 (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
172 (code == HNS3_MBX_GET_RING_VECTOR_MAP);
173 if (!is_ring_vector_msg)
174 req->msg[1] = subcode;
176 offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
177 memcpy(&req->msg[offset], msg_data, msg_len);
180 /* synchronous send */
182 req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
183 rte_spinlock_lock(&hw->mbx_resp.lock);
184 hns3_mbx_prepare_resp(hw, code, subcode);
185 req->match_id = hw->mbx_resp.match_id;
186 ret = hns3_cmd_send(hw, &desc, 1);
189 rte_spinlock_unlock(&hw->mbx_resp.lock);
190 hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
195 ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
196 rte_spinlock_unlock(&hw->mbx_resp.lock);
198 /* asynchronous send */
199 ret = hns3_cmd_send(hw, &desc, 1);
201 hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
211 hns3_cmd_crq_empty(struct hns3_hw *hw)
213 uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
215 return tail == hw->cmq.crq.next_to_use;
219 hns3vf_handle_link_change_event(struct hns3_hw *hw,
220 struct hns3_mbx_pf_to_vf_cmd *req)
222 uint8_t link_status, link_duplex;
223 uint16_t *msg_q = req->msg;
224 uint8_t support_push_lsc;
227 memcpy(&link_speed, &msg_q[2], sizeof(link_speed));
228 link_status = rte_le_to_cpu_16(msg_q[1]);
229 link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
230 hns3vf_update_link_status(hw, link_status, link_speed,
232 support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u;
233 hns3vf_update_push_lsc_cap(hw, support_push_lsc);
237 hns3_handle_asserting_reset(struct hns3_hw *hw,
238 struct hns3_mbx_pf_to_vf_cmd *req)
240 enum hns3_reset_level reset_level;
241 uint16_t *msg_q = req->msg;
244 * PF has asserted reset hence VF should go in pending
245 * state and poll for the hardware reset status till it
246 * has been completely reset. After this stack should
247 * eventually be re-initialized.
249 reset_level = rte_le_to_cpu_16(msg_q[1]);
250 hns3_atomic_set_bit(reset_level, &hw->reset.pending);
252 hns3_warn(hw, "PF inform reset level %d", reset_level);
253 hw->reset.stats.request_cnt++;
254 hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
258 * Case1: receive response after timeout, req_msg_data
259 * is 0, not equal resp_msg, do lost--
260 * Case2: receive last response during new send_mbx_msg,
261 * req_msg_data is different with resp_msg, let
262 * lost--, continue to wait for response.
265 hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
267 struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
268 uint32_t tail = resp->tail + 1;
270 if (tail > resp->head)
272 if (resp->req_msg_data != resp_msg) {
275 hns3_warn(hw, "Received a mismatched response req_msg(%x) "
276 "resp_msg(%x) head(%u) tail(%u) lost(%u)",
277 resp->req_msg_data, resp_msg, resp->head, tail,
279 } else if (tail + resp->lost > resp->head) {
281 hns3_warn(hw, "Received a new response again resp_msg(%x) "
282 "head(%u) tail(%u) lost(%u)", resp_msg,
283 resp->head, tail, resp->lost);
290 hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
292 struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
295 if (req->match_id != 0) {
297 * If match_id is not zero, it means PF support copy request's
298 * match_id to its response. So VF could use the match_id
299 * to match the request.
301 if (resp->matching_scheme !=
302 HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID) {
303 resp->matching_scheme =
304 HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID;
305 hns3_info(hw, "detect mailbox support match id!");
307 if (req->match_id == resp->match_id) {
308 resp->resp_status = hns3_resp_to_errno(req->msg[3]);
309 memcpy(resp->additional_info, &req->msg[4],
310 HNS3_MBX_MAX_RESP_DATA_SIZE);
312 resp->received_match_resp = true;
318 * If the below instructions can be executed, it means PF does not
319 * support copy request's match_id to its response. So VF follows the
320 * original scheme to process.
322 resp->resp_status = hns3_resp_to_errno(req->msg[3]);
323 memcpy(resp->additional_info, &req->msg[4],
324 HNS3_MBX_MAX_RESP_DATA_SIZE);
325 msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
326 hns3_update_resp_position(hw, msg_data);
330 hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
332 switch (link_fail_code) {
333 case HNS3_MBX_LF_NORMAL:
335 case HNS3_MBX_LF_REF_CLOCK_LOST:
336 hns3_warn(hw, "Reference clock lost!");
338 case HNS3_MBX_LF_XSFP_TX_DISABLE:
339 hns3_warn(hw, "SFP tx is disabled!");
341 case HNS3_MBX_LF_XSFP_ABSENT:
342 hns3_warn(hw, "SFP is absent!");
345 hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
351 hns3pf_handle_link_change_event(struct hns3_hw *hw,
352 struct hns3_mbx_vf_to_pf_cmd *req)
354 #define LINK_STATUS_OFFSET 1
355 #define LINK_FAIL_CODE_OFFSET 2
357 if (!req->msg[LINK_STATUS_OFFSET])
358 hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
360 hns3_update_linkstatus_and_event(hw, true);
364 hns3_update_port_base_vlan_info(struct hns3_hw *hw,
365 struct hns3_mbx_pf_to_vf_cmd *req)
367 #define PVID_STATE_OFFSET 1
368 uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
369 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
371 * Currently, hardware doesn't support more than two layers VLAN offload
372 * based on hns3 network engine, which would cause packets loss or wrong
373 * packets for these types of packets. If the hns3 PF kernel ethdev
374 * driver sets the PVID for VF device after initialization of the
375 * related VF device, the PF driver will notify VF driver to update the
376 * PVID configuration state. The VF driver will update the PVID
377 * configuration state immediately to ensure that the VLAN process in Tx
378 * and Rx is correct. But in the window period of this state transition,
379 * packets loss or packets with wrong VLAN may occur.
381 if (hw->port_base_vlan_cfg.state != new_pvid_state) {
382 hw->port_base_vlan_cfg.state = new_pvid_state;
383 hns3_update_all_queues_pvid_proc_en(hw);
388 hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
392 * When promisc/allmulti mode is closed by the hns3 PF kernel
393 * ethdev driver for untrusted, modify VF's related status.
395 hns3_warn(hw, "Promisc mode will be closed by host for being "
397 hw->data->promiscuous = 0;
398 hw->data->all_multicast = 0;
403 hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw)
405 struct hns3_cmq_ring *crq = &hw->cmq.crq;
406 struct hns3_mbx_pf_to_vf_cmd *req;
407 struct hns3_cmd_desc *desc;
408 uint32_t tail, next_to_use;
412 tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
413 next_to_use = crq->next_to_use;
414 while (next_to_use != tail) {
415 desc = &crq->desc[next_to_use];
416 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
417 opcode = req->msg[0] & 0xff;
419 flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag);
420 if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))
423 if (crq->desc[next_to_use].opcode == 0)
426 if (opcode == HNS3_MBX_PF_VF_RESP) {
427 hns3_handle_mbx_response(hw, req);
429 * Clear opcode to inform intr thread don't process
432 crq->desc[crq->next_to_use].opcode = 0;
436 next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num;
439 crq->next_to_use = next_to_use;
440 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
444 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
446 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
447 struct hns3_cmq_ring *crq = &hw->cmq.crq;
448 struct hns3_mbx_pf_to_vf_cmd *req;
449 struct hns3_cmd_desc *desc;
454 rte_spinlock_lock(&hw->cmq.crq.lock);
456 handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY ||
457 !rte_thread_is_intr()) && hns->is_vf;
460 * Currently, any threads in the primary and secondary processes
461 * could send mailbox sync request, so it will need to process
462 * the crq message (which is the HNS3_MBX_PF_VF_RESP) in there
463 * own thread context. It may also process other messages
464 * because it uses the policy of processing all pending messages
466 * But some messages such as HNS3_MBX_PUSH_LINK_STATUS could
467 * only process within the intr thread in primary process,
468 * otherwise it may lead to report lsc event in secondary
470 * So the threads other than intr thread in primary process
471 * could only process HNS3_MBX_PF_VF_RESP message, if the
472 * message processed, its opcode will rewrite with zero, then
473 * the intr thread in primary process will not process again.
475 hns3_handle_mbx_msg_out_intr(hw);
476 rte_spinlock_unlock(&hw->cmq.crq.lock);
480 while (!hns3_cmd_crq_empty(hw)) {
481 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
482 rte_spinlock_unlock(&hw->cmq.crq.lock);
486 desc = &crq->desc[crq->next_to_use];
487 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
488 opcode = req->msg[0] & 0xff;
490 flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
491 if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
493 "dropped invalid mailbox message, code = %u",
496 /* dropping/not processing this invalid message */
497 crq->desc[crq->next_to_use].flag = 0;
498 hns3_mbx_ring_ptr_move_crq(crq);
502 handle_out = hns->is_vf && desc->opcode == 0;
504 /* Message already processed by other thread */
505 crq->desc[crq->next_to_use].flag = 0;
506 hns3_mbx_ring_ptr_move_crq(crq);
511 case HNS3_MBX_PF_VF_RESP:
512 hns3_handle_mbx_response(hw, req);
514 case HNS3_MBX_LINK_STAT_CHANGE:
515 hns3vf_handle_link_change_event(hw, req);
517 case HNS3_MBX_ASSERTING_RESET:
518 hns3_handle_asserting_reset(hw, req);
520 case HNS3_MBX_PUSH_LINK_STATUS:
522 * This message is reported by the firmware and is
523 * reported in 'struct hns3_mbx_vf_to_pf_cmd' format.
524 * Therefore, we should cast the req variable to
525 * 'struct hns3_mbx_vf_to_pf_cmd' and then process it.
527 hns3pf_handle_link_change_event(hw,
528 (struct hns3_mbx_vf_to_pf_cmd *)req);
530 case HNS3_MBX_PUSH_VLAN_INFO:
532 * When the PVID configuration status of VF device is
533 * changed by the hns3 PF kernel driver, VF driver will
534 * receive this mailbox message from PF driver.
536 hns3_update_port_base_vlan_info(hw, req);
538 case HNS3_MBX_PUSH_PROMISC_INFO:
540 * When the trust status of VF device changed by the
541 * hns3 PF kernel driver, VF driver will receive this
542 * mailbox message from PF driver.
544 hns3_handle_promisc_info(hw, req->msg[1]);
547 hns3_err(hw, "received unsupported(%u) mbx msg",
552 crq->desc[crq->next_to_use].flag = 0;
553 hns3_mbx_ring_ptr_move_crq(crq);
556 /* Write back CMDQ_RQ header pointer, IMP need this pointer */
557 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
559 rte_spinlock_unlock(&hw->cmq.crq.lock);