net/hns3: fix VF handling LSC event in secondary process
[dpdk.git] / drivers / net / hns3 / hns3_mbx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <ethdev_driver.h>
6 #include <rte_io.h>
7
8 #include "hns3_ethdev.h"
9 #include "hns3_regs.h"
10 #include "hns3_logs.h"
11 #include "hns3_intr.h"
12 #include "hns3_rxtx.h"
13
14 #define HNS3_CMD_CODE_OFFSET            2
15
16 static const struct errno_respcode_map err_code_map[] = {
17         {0, 0},
18         {1, -EPERM},
19         {2, -ENOENT},
20         {5, -EIO},
21         {11, -EAGAIN},
22         {12, -ENOMEM},
23         {16, -EBUSY},
24         {22, -EINVAL},
25         {28, -ENOSPC},
26         {95, -EOPNOTSUPP},
27 };
28
29 static int
30 hns3_resp_to_errno(uint16_t resp_code)
31 {
32         uint32_t i, num;
33
34         num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
35         for (i = 0; i < num; i++) {
36                 if (err_code_map[i].resp_code == resp_code)
37                         return err_code_map[i].err_no;
38         }
39
40         return -EIO;
41 }
42
43 static void
44 hns3_mbx_proc_timeout(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
45 {
46         if (hw->mbx_resp.matching_scheme ==
47             HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) {
48                 hw->mbx_resp.lost++;
49                 hns3_err(hw,
50                          "VF could not get mbx(%u,%u) head(%u) tail(%u) "
51                          "lost(%u) from PF",
52                          code, subcode, hw->mbx_resp.head, hw->mbx_resp.tail,
53                          hw->mbx_resp.lost);
54                 return;
55         }
56
57         hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode);
58 }
59
60 static int
61 hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
62                   uint8_t *resp_data, uint16_t resp_len)
63 {
64 #define HNS3_MAX_RETRY_MS       500
65 #define HNS3_WAIT_RESP_US       100
66         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
67         struct hns3_mbx_resp_status *mbx_resp;
68         bool received;
69         uint64_t now;
70         uint64_t end;
71
72         if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
73                 hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
74                          resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
75                 return -EINVAL;
76         }
77
78         now = get_timeofday_ms();
79         end = now + HNS3_MAX_RETRY_MS;
80         while (now < end) {
81                 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
82                         hns3_err(hw, "Don't wait for mbx respone because of "
83                                  "disable_cmd");
84                         return -EBUSY;
85                 }
86
87                 if (is_reset_pending(hns)) {
88                         hw->mbx_resp.req_msg_data = 0;
89                         hns3_err(hw, "Don't wait for mbx respone because of "
90                                  "reset pending");
91                         return -EIO;
92                 }
93
94                 hns3_dev_handle_mbx_msg(hw);
95                 rte_delay_us(HNS3_WAIT_RESP_US);
96
97                 if (hw->mbx_resp.matching_scheme ==
98                     HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL)
99                         received = (hw->mbx_resp.head ==
100                                     hw->mbx_resp.tail + hw->mbx_resp.lost);
101                 else
102                         received = hw->mbx_resp.received_match_resp;
103                 if (received)
104                         break;
105
106                 now = get_timeofday_ms();
107         }
108         hw->mbx_resp.req_msg_data = 0;
109         if (now >= end) {
110                 hns3_mbx_proc_timeout(hw, code, subcode);
111                 return -ETIME;
112         }
113         rte_io_rmb();
114         mbx_resp = &hw->mbx_resp;
115
116         if (mbx_resp->resp_status)
117                 return mbx_resp->resp_status;
118
119         if (resp_data)
120                 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
121
122         return 0;
123 }
124
125 static void
126 hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
127 {
128         /*
129          * Init both matching scheme fields because we may not know the exact
130          * scheme will be used when in the initial phase.
131          *
132          * Also, there are OK to init both matching scheme fields even though
133          * we get the exact scheme which is used.
134          */
135         hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
136         hw->mbx_resp.head++;
137
138         /* Update match_id and ensure the value of match_id is not zero */
139         hw->mbx_resp.match_id++;
140         if (hw->mbx_resp.match_id == 0)
141                 hw->mbx_resp.match_id = 1;
142         hw->mbx_resp.received_match_resp = false;
143
144         hw->mbx_resp.resp_status = 0;
145         memset(hw->mbx_resp.additional_info, 0, HNS3_MBX_MAX_RESP_DATA_SIZE);
146 }
147
148 int
149 hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
150                   const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
151                   uint8_t *resp_data, uint16_t resp_len)
152 {
153         struct hns3_mbx_vf_to_pf_cmd *req;
154         struct hns3_cmd_desc desc;
155         bool is_ring_vector_msg;
156         int offset;
157         int ret;
158
159         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
160
161         /* first two bytes are reserved for code & subcode */
162         if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
163                 hns3_err(hw,
164                          "VF send mbx msg fail, msg len %u exceeds max payload len %d",
165                          msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
166                 return -EINVAL;
167         }
168
169         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
170         req->msg[0] = code;
171         is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
172                              (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
173                              (code == HNS3_MBX_GET_RING_VECTOR_MAP);
174         if (!is_ring_vector_msg)
175                 req->msg[1] = subcode;
176         if (msg_data) {
177                 offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
178                 memcpy(&req->msg[offset], msg_data, msg_len);
179         }
180
181         /* synchronous send */
182         if (need_resp) {
183                 req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
184                 rte_spinlock_lock(&hw->mbx_resp.lock);
185                 hns3_mbx_prepare_resp(hw, code, subcode);
186                 req->match_id = hw->mbx_resp.match_id;
187                 ret = hns3_cmd_send(hw, &desc, 1);
188                 if (ret) {
189                         hw->mbx_resp.head--;
190                         rte_spinlock_unlock(&hw->mbx_resp.lock);
191                         hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
192                                  ret);
193                         return ret;
194                 }
195
196                 ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
197                 rte_spinlock_unlock(&hw->mbx_resp.lock);
198         } else {
199                 /* asynchronous send */
200                 ret = hns3_cmd_send(hw, &desc, 1);
201                 if (ret) {
202                         hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
203                                  ret);
204                         return ret;
205                 }
206         }
207
208         return ret;
209 }
210
211 static bool
212 hns3_cmd_crq_empty(struct hns3_hw *hw)
213 {
214         uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
215
216         return tail == hw->cmq.crq.next_to_use;
217 }
218
219 static void
220 hns3vf_handle_link_change_event(struct hns3_hw *hw,
221                                 struct hns3_mbx_pf_to_vf_cmd *req)
222 {
223         uint8_t link_status, link_duplex;
224         uint16_t *msg_q = req->msg;
225         uint8_t support_push_lsc;
226         uint32_t link_speed;
227
228         memcpy(&link_speed, &msg_q[2], sizeof(link_speed));
229         link_status = rte_le_to_cpu_16(msg_q[1]);
230         link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
231         hns3vf_update_link_status(hw, link_status, link_speed,
232                                   link_duplex);
233         support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u;
234         hns3vf_update_push_lsc_cap(hw, support_push_lsc);
235 }
236
237 static void
238 hns3_handle_asserting_reset(struct hns3_hw *hw,
239                             struct hns3_mbx_pf_to_vf_cmd *req)
240 {
241         enum hns3_reset_level reset_level;
242         uint16_t *msg_q = req->msg;
243
244         /*
245          * PF has asserted reset hence VF should go in pending
246          * state and poll for the hardware reset status till it
247          * has been completely reset. After this stack should
248          * eventually be re-initialized.
249          */
250         reset_level = rte_le_to_cpu_16(msg_q[1]);
251         hns3_atomic_set_bit(reset_level, &hw->reset.pending);
252
253         hns3_warn(hw, "PF inform reset level %d", reset_level);
254         hw->reset.stats.request_cnt++;
255         hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
256 }
257
258 /*
259  * Case1: receive response after timeout, req_msg_data
260  *        is 0, not equal resp_msg, do lost--
261  * Case2: receive last response during new send_mbx_msg,
262  *        req_msg_data is different with resp_msg, let
263  *        lost--, continue to wait for response.
264  */
265 static void
266 hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
267 {
268         struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
269         uint32_t tail = resp->tail + 1;
270
271         if (tail > resp->head)
272                 tail = resp->head;
273         if (resp->req_msg_data != resp_msg) {
274                 if (resp->lost)
275                         resp->lost--;
276                 hns3_warn(hw, "Received a mismatched response req_msg(%x) "
277                           "resp_msg(%x) head(%u) tail(%u) lost(%u)",
278                           resp->req_msg_data, resp_msg, resp->head, tail,
279                           resp->lost);
280         } else if (tail + resp->lost > resp->head) {
281                 resp->lost--;
282                 hns3_warn(hw, "Received a new response again resp_msg(%x) "
283                           "head(%u) tail(%u) lost(%u)", resp_msg,
284                           resp->head, tail, resp->lost);
285         }
286         rte_io_wmb();
287         resp->tail = tail;
288 }
289
290 static void
291 hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
292 {
293         struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
294         uint32_t msg_data;
295
296         if (req->match_id != 0) {
297                 /*
298                  * If match_id is not zero, it means PF support copy request's
299                  * match_id to its response. So VF could use the match_id
300                  * to match the request.
301                  */
302                 if (resp->matching_scheme !=
303                     HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID) {
304                         resp->matching_scheme =
305                                 HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID;
306                         hns3_info(hw, "detect mailbox support match id!");
307                 }
308                 if (req->match_id == resp->match_id) {
309                         resp->resp_status = hns3_resp_to_errno(req->msg[3]);
310                         memcpy(resp->additional_info, &req->msg[4],
311                                HNS3_MBX_MAX_RESP_DATA_SIZE);
312                         rte_io_wmb();
313                         resp->received_match_resp = true;
314                 }
315                 return;
316         }
317
318         /*
319          * If the below instructions can be executed, it means PF does not
320          * support copy request's match_id to its response. So VF follows the
321          * original scheme to process.
322          */
323         resp->resp_status = hns3_resp_to_errno(req->msg[3]);
324         memcpy(resp->additional_info, &req->msg[4],
325                HNS3_MBX_MAX_RESP_DATA_SIZE);
326         msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
327         hns3_update_resp_position(hw, msg_data);
328 }
329
330 static void
331 hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
332 {
333         switch (link_fail_code) {
334         case HNS3_MBX_LF_NORMAL:
335                 break;
336         case HNS3_MBX_LF_REF_CLOCK_LOST:
337                 hns3_warn(hw, "Reference clock lost!");
338                 break;
339         case HNS3_MBX_LF_XSFP_TX_DISABLE:
340                 hns3_warn(hw, "SFP tx is disabled!");
341                 break;
342         case HNS3_MBX_LF_XSFP_ABSENT:
343                 hns3_warn(hw, "SFP is absent!");
344                 break;
345         default:
346                 hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
347                 break;
348         }
349 }
350
351 static void
352 hns3pf_handle_link_change_event(struct hns3_hw *hw,
353                               struct hns3_mbx_pf_to_vf_cmd *req)
354 {
355 #define LINK_STATUS_OFFSET     1
356 #define LINK_FAIL_CODE_OFFSET  2
357
358         if (!req->msg[LINK_STATUS_OFFSET])
359                 hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
360
361         hns3_update_linkstatus_and_event(hw, true);
362 }
363
364 static void
365 hns3_update_port_base_vlan_info(struct hns3_hw *hw,
366                                 struct hns3_mbx_pf_to_vf_cmd *req)
367 {
368 #define PVID_STATE_OFFSET       1
369         uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
370                 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
371         /*
372          * Currently, hardware doesn't support more than two layers VLAN offload
373          * based on hns3 network engine, which would cause packets loss or wrong
374          * packets for these types of packets. If the hns3 PF kernel ethdev
375          * driver sets the PVID for VF device after initialization of the
376          * related VF device, the PF driver will notify VF driver to update the
377          * PVID configuration state. The VF driver will update the PVID
378          * configuration state immediately to ensure that the VLAN process in Tx
379          * and Rx is correct. But in the window period of this state transition,
380          * packets loss or packets with wrong VLAN may occur.
381          */
382         if (hw->port_base_vlan_cfg.state != new_pvid_state) {
383                 hw->port_base_vlan_cfg.state = new_pvid_state;
384                 hns3_update_all_queues_pvid_proc_en(hw);
385         }
386 }
387
388 static void
389 hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
390 {
391         if (!promisc_en) {
392                 /*
393                  * When promisc/allmulti mode is closed by the hns3 PF kernel
394                  * ethdev driver for untrusted, modify VF's related status.
395                  */
396                 hns3_warn(hw, "Promisc mode will be closed by host for being "
397                               "untrusted.");
398                 hw->data->promiscuous = 0;
399                 hw->data->all_multicast = 0;
400         }
401 }
402
403 static void
404 hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw)
405 {
406         struct hns3_cmq_ring *crq = &hw->cmq.crq;
407         struct hns3_mbx_pf_to_vf_cmd *req;
408         struct hns3_cmd_desc *desc;
409         uint32_t tail, next_to_use;
410         uint8_t opcode;
411         uint16_t flag;
412
413         tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
414         next_to_use = crq->next_to_use;
415         while (next_to_use != tail) {
416                 desc = &crq->desc[next_to_use];
417                 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
418                 opcode = req->msg[0] & 0xff;
419
420                 flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag);
421                 if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))
422                         goto scan_next;
423
424                 if (crq->desc[next_to_use].opcode == 0)
425                         goto scan_next;
426
427                 if (opcode == HNS3_MBX_PF_VF_RESP) {
428                         hns3_handle_mbx_response(hw, req);
429                         /*
430                          * Clear opcode to inform intr thread don't process
431                          * again.
432                          */
433                         crq->desc[crq->next_to_use].opcode = 0;
434                 }
435
436 scan_next:
437                 next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num;
438         }
439 }
440
441 void
442 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
443 {
444         struct hns3_cmq_ring *crq = &hw->cmq.crq;
445         struct hns3_mbx_pf_to_vf_cmd *req;
446         struct hns3_cmd_desc *desc;
447         uint8_t opcode;
448         uint16_t flag;
449
450         rte_spinlock_lock(&hw->cmq.crq.lock);
451
452         if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
453             !rte_thread_is_intr()) {
454                 /*
455                  * Currently, any threads in the primary and secondary processes
456                  * could send mailbox sync request, so it will need to process
457                  * the crq message (which is the HNS3_MBX_PF_VF_RESP) in there
458                  * own thread context. It may also process other messages
459                  * because it uses the policy of processing all pending messages
460                  * at once.
461                  * But some messages such as HNS3_MBX_PUSH_LINK_STATUS could
462                  * only process within the intr thread in primary process,
463                  * otherwise it may lead to report lsc event in secondary
464                  * process.
465                  * So the threads other than intr thread in primary process
466                  * could only process HNS3_MBX_PF_VF_RESP message, if the
467                  * message processed, its opcode will rewrite with zero, then
468                  * the intr thread in primary process will not process again.
469                  */
470                 hns3_handle_mbx_msg_out_intr(hw);
471                 rte_spinlock_unlock(&hw->cmq.crq.lock);
472                 return;
473         }
474
475         while (!hns3_cmd_crq_empty(hw)) {
476                 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
477                         rte_spinlock_unlock(&hw->cmq.crq.lock);
478                         return;
479                 }
480
481                 desc = &crq->desc[crq->next_to_use];
482                 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
483                 opcode = req->msg[0] & 0xff;
484
485                 flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
486                 if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
487                         hns3_warn(hw,
488                                   "dropped invalid mailbox message, code = %u",
489                                   opcode);
490
491                         /* dropping/not processing this invalid message */
492                         crq->desc[crq->next_to_use].flag = 0;
493                         hns3_mbx_ring_ptr_move_crq(crq);
494                         continue;
495                 }
496
497                 if (desc->opcode == 0) {
498                         /* Message already processed by other thread */
499                         crq->desc[crq->next_to_use].flag = 0;
500                         hns3_mbx_ring_ptr_move_crq(crq);
501                         continue;
502                 }
503
504                 switch (opcode) {
505                 case HNS3_MBX_PF_VF_RESP:
506                         hns3_handle_mbx_response(hw, req);
507                         break;
508                 case HNS3_MBX_LINK_STAT_CHANGE:
509                         hns3vf_handle_link_change_event(hw, req);
510                         break;
511                 case HNS3_MBX_ASSERTING_RESET:
512                         hns3_handle_asserting_reset(hw, req);
513                         break;
514                 case HNS3_MBX_PUSH_LINK_STATUS:
515                         hns3pf_handle_link_change_event(hw, req);
516                         break;
517                 case HNS3_MBX_PUSH_VLAN_INFO:
518                         /*
519                          * When the PVID configuration status of VF device is
520                          * changed by the hns3 PF kernel driver, VF driver will
521                          * receive this mailbox message from PF driver.
522                          */
523                         hns3_update_port_base_vlan_info(hw, req);
524                         break;
525                 case HNS3_MBX_PUSH_PROMISC_INFO:
526                         /*
527                          * When the trust status of VF device changed by the
528                          * hns3 PF kernel driver, VF driver will receive this
529                          * mailbox message from PF driver.
530                          */
531                         hns3_handle_promisc_info(hw, req->msg[1]);
532                         break;
533                 default:
534                         hns3_err(hw,
535                                  "VF received unsupported(%u) mbx msg from PF",
536                                  req->msg[0]);
537                         break;
538                 }
539
540                 crq->desc[crq->next_to_use].flag = 0;
541                 hns3_mbx_ring_ptr_move_crq(crq);
542         }
543
544         /* Write back CMDQ_RQ header pointer, IMP need this pointer */
545         hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
546
547         rte_spinlock_unlock(&hw->cmq.crq.lock);
548 }