34c8c688fcaec3c84cbf3bc4c24353feebe176b5
[dpdk.git] / drivers / net / hns3 / hns3_mbx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <inttypes.h>
11 #include <unistd.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
15 #include <rte_dev.h>
16 #include <rte_ethdev_driver.h>
17 #include <rte_io.h>
18 #include <rte_spinlock.h>
19 #include <rte_pci.h>
20 #include <rte_bus_pci.h>
21
22 #include "hns3_ethdev.h"
23 #include "hns3_regs.h"
24 #include "hns3_logs.h"
25 #include "hns3_intr.h"
26
27 #define HNS3_CMD_CODE_OFFSET            2
28
29 static const struct errno_respcode_map err_code_map[] = {
30         {0, 0},
31         {1, -EPERM},
32         {2, -ENOENT},
33         {5, -EIO},
34         {11, -EAGAIN},
35         {12, -ENOMEM},
36         {16, -EBUSY},
37         {22, -EINVAL},
38         {28, -ENOSPC},
39         {95, -EOPNOTSUPP},
40 };
41
42 static int
43 hns3_resp_to_errno(uint16_t resp_code)
44 {
45         uint32_t i, num;
46
47         num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
48         for (i = 0; i < num; i++) {
49                 if (err_code_map[i].resp_code == resp_code)
50                         return err_code_map[i].err_no;
51         }
52
53         return -EIO;
54 }
55
56 static void
57 hns3_poll_all_sync_msg(void)
58 {
59         struct rte_eth_dev *eth_dev;
60         struct hns3_adapter *adapter;
61         const char *name;
62         uint16_t port_id;
63
64         RTE_ETH_FOREACH_DEV(port_id) {
65                 eth_dev = &rte_eth_devices[port_id];
66                 name = eth_dev->device->driver->name;
67                 if (strcmp(name, "net_hns3") && strcmp(name, "net_hns3_vf"))
68                         continue;
69                 adapter = eth_dev->data->dev_private;
70                 if (!adapter || adapter->hw.adapter_state == HNS3_NIC_CLOSED)
71                         continue;
72                 /* Synchronous msg, the mbx_resp.req_msg_data is non-zero */
73                 if (adapter->hw.mbx_resp.req_msg_data)
74                         hns3_dev_handle_mbx_msg(&adapter->hw);
75         }
76 }
77
78 static int
79 hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
80                   uint8_t *resp_data, uint16_t resp_len)
81 {
82 #define HNS3_MAX_RETRY_MS       500
83         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
84         struct hns3_mbx_resp_status *mbx_resp;
85         bool in_irq = false;
86         uint64_t now;
87         uint64_t end;
88
89         if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
90                 hns3_err(hw, "VF mbx response len(=%d) exceeds maximum(=%d)",
91                          resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
92                 return -EINVAL;
93         }
94
95         now = get_timeofday_ms();
96         end = now + HNS3_MAX_RETRY_MS;
97         while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
98                (now < end)) {
99                 if (rte_atomic16_read(&hw->reset.disable_cmd)) {
100                         hns3_err(hw, "Don't wait for mbx respone because of "
101                                  "disable_cmd");
102                         return -EBUSY;
103                 }
104
105                 if (is_reset_pending(hns)) {
106                         hw->mbx_resp.req_msg_data = 0;
107                         hns3_err(hw, "Don't wait for mbx respone because of "
108                                  "reset pending");
109                         return -EIO;
110                 }
111
112                 /*
113                  * The mbox response is running on the interrupt thread.
114                  * Sending mbox in the interrupt thread cannot wait for the
115                  * response, so polling the mbox response on the irq thread.
116                  */
117                 if (pthread_equal(hw->irq_thread_id, pthread_self())) {
118                         in_irq = true;
119                         hns3_poll_all_sync_msg();
120                 } else {
121                         rte_delay_ms(HNS3_POLL_RESPONE_MS);
122                 }
123                 now = get_timeofday_ms();
124         }
125         hw->mbx_resp.req_msg_data = 0;
126         if (now >= end) {
127                 hw->mbx_resp.lost++;
128                 hns3_err(hw,
129                          "VF could not get mbx(%d,%d) head(%d) tail(%d) lost(%d) from PF in_irq:%d",
130                          code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail,
131                          hw->mbx_resp.lost, in_irq);
132                 return -ETIME;
133         }
134         rte_io_rmb();
135         mbx_resp = &hw->mbx_resp;
136
137         if (mbx_resp->resp_status)
138                 return mbx_resp->resp_status;
139
140         if (resp_data)
141                 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
142
143         return 0;
144 }
145
146 int
147 hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
148                   const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
149                   uint8_t *resp_data, uint16_t resp_len)
150 {
151         struct hns3_mbx_vf_to_pf_cmd *req;
152         struct hns3_cmd_desc desc;
153         bool is_ring_vector_msg;
154         int offset;
155         int ret;
156
157         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
158
159         /* first two bytes are reserved for code & subcode */
160         if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
161                 hns3_err(hw,
162                          "VF send mbx msg fail, msg len %d exceeds max payload len %d",
163                          msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
164                 return -EINVAL;
165         }
166
167         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
168         req->msg[0] = code;
169         is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
170                              (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
171                              (code == HNS3_MBX_GET_RING_VECTOR_MAP);
172         if (!is_ring_vector_msg)
173                 req->msg[1] = subcode;
174         if (msg_data) {
175                 offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
176                 memcpy(&req->msg[offset], msg_data, msg_len);
177         }
178
179         /* synchronous send */
180         if (need_resp) {
181                 req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
182                 rte_spinlock_lock(&hw->mbx_resp.lock);
183                 hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
184                 hw->mbx_resp.head++;
185                 ret = hns3_cmd_send(hw, &desc, 1);
186                 if (ret) {
187                         rte_spinlock_unlock(&hw->mbx_resp.lock);
188                         hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
189                                  ret);
190                         return ret;
191                 }
192
193                 ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
194                 rte_spinlock_unlock(&hw->mbx_resp.lock);
195         } else {
196                 /* asynchronous send */
197                 ret = hns3_cmd_send(hw, &desc, 1);
198                 if (ret) {
199                         hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
200                                  ret);
201                         return ret;
202                 }
203         }
204
205         return ret;
206 }
207
208 static bool
209 hns3_cmd_crq_empty(struct hns3_hw *hw)
210 {
211         uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
212
213         return tail == hw->cmq.crq.next_to_use;
214 }
215
216 static void
217 hns3_mbx_handler(struct hns3_hw *hw)
218 {
219         struct hns3_mac *mac = &hw->mac;
220         enum hns3_reset_level reset_level;
221         uint16_t *msg_q;
222         uint8_t opcode;
223         uint32_t tail;
224
225         tail = hw->arq.tail;
226
227         /* process all the async queue messages */
228         while (tail != hw->arq.head) {
229                 msg_q = hw->arq.msg_q[hw->arq.head];
230
231                 opcode = msg_q[0] & 0xff;
232                 switch (opcode) {
233                 case HNS3_MBX_LINK_STAT_CHANGE:
234                         memcpy(&mac->link_speed, &msg_q[2],
235                                    sizeof(mac->link_speed));
236                         mac->link_status = rte_le_to_cpu_16(msg_q[1]);
237                         mac->link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
238                         break;
239                 case HNS3_MBX_ASSERTING_RESET:
240                         /* PF has asserted reset hence VF should go in pending
241                          * state and poll for the hardware reset status till it
242                          * has been completely reset. After this stack should
243                          * eventually be re-initialized.
244                          */
245                         reset_level = rte_le_to_cpu_16(msg_q[1]);
246                         hns3_atomic_set_bit(reset_level, &hw->reset.pending);
247
248                         hns3_warn(hw, "PF inform reset level %d", reset_level);
249                         hw->reset.stats.request_cnt++;
250                         hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
251                         break;
252                 default:
253                         hns3_err(hw, "Fetched unsupported(%d) message from arq",
254                                  opcode);
255                         break;
256                 }
257
258                 hns3_mbx_head_ptr_move_arq(hw->arq);
259                 msg_q = hw->arq.msg_q[hw->arq.head];
260         }
261 }
262
263 /*
264  * Case1: receive response after timeout, req_msg_data
265  *        is 0, not equal resp_msg, do lost--
266  * Case2: receive last response during new send_mbx_msg,
267  *        req_msg_data is different with resp_msg, let
268  *        lost--, continue to wait for response.
269  */
270 static void
271 hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
272 {
273         struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
274         uint32_t tail = resp->tail + 1;
275
276         if (tail > resp->head)
277                 tail = resp->head;
278         if (resp->req_msg_data != resp_msg) {
279                 if (resp->lost)
280                         resp->lost--;
281                 hns3_warn(hw, "Received a mismatched response req_msg(%x) "
282                           "resp_msg(%x) head(%d) tail(%d) lost(%d)",
283                           resp->req_msg_data, resp_msg, resp->head, tail,
284                           resp->lost);
285         } else if (tail + resp->lost > resp->head) {
286                 resp->lost--;
287                 hns3_warn(hw, "Received a new response again resp_msg(%x) "
288                           "head(%d) tail(%d) lost(%d)", resp_msg,
289                           resp->head, tail, resp->lost);
290         }
291         rte_io_wmb();
292         resp->tail = tail;
293 }
294
295 static void
296 hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
297 {
298         switch (link_fail_code) {
299         case HNS3_MBX_LF_NORMAL:
300                 break;
301         case HNS3_MBX_LF_REF_CLOCK_LOST:
302                 hns3_warn(hw, "Reference clock lost!");
303                 break;
304         case HNS3_MBX_LF_XSFP_TX_DISABLE:
305                 hns3_warn(hw, "SFP tx is disabled!");
306                 break;
307         case HNS3_MBX_LF_XSFP_ABSENT:
308                 hns3_warn(hw, "SFP is absent!");
309                 break;
310         default:
311                 hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
312                 break;
313         }
314 }
315
316 static void
317 hns3_handle_link_change_event(struct hns3_hw *hw,
318                               struct hns3_mbx_pf_to_vf_cmd *req)
319 {
320 #define LINK_STATUS_OFFSET     1
321 #define LINK_FAIL_CODE_OFFSET  2
322
323         if (!req->msg[LINK_STATUS_OFFSET])
324                 hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
325
326         hns3_update_link_status(hw);
327 }
328
329 static void
330 hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
331 {
332         if (!promisc_en) {
333                 /*
334                  * When promisc/allmulti mode is closed by the hns3 PF kernel
335                  * ethdev driver for untrusted, modify VF's related status.
336                  */
337                 hns3_warn(hw, "Promisc mode will be closed by host for being "
338                               "untrusted.");
339                 hw->data->promiscuous = 0;
340                 hw->data->all_multicast = 0;
341         }
342 }
343
344 void
345 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
346 {
347         struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
348         struct hns3_cmq_ring *crq = &hw->cmq.crq;
349         struct hns3_mbx_pf_to_vf_cmd *req;
350         struct hns3_cmd_desc *desc;
351         uint32_t msg_data;
352         uint16_t *msg_q;
353         uint8_t opcode;
354         uint16_t flag;
355         uint8_t *temp;
356         int i;
357
358         while (!hns3_cmd_crq_empty(hw)) {
359                 if (rte_atomic16_read(&hw->reset.disable_cmd))
360                         return;
361
362                 desc = &crq->desc[crq->next_to_use];
363                 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
364                 opcode = req->msg[0] & 0xff;
365
366                 flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
367                 if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
368                         hns3_warn(hw,
369                                   "dropped invalid mailbox message, code = %d",
370                                   opcode);
371
372                         /* dropping/not processing this invalid message */
373                         crq->desc[crq->next_to_use].flag = 0;
374                         hns3_mbx_ring_ptr_move_crq(crq);
375                         continue;
376                 }
377
378                 switch (opcode) {
379                 case HNS3_MBX_PF_VF_RESP:
380                         resp->resp_status = hns3_resp_to_errno(req->msg[3]);
381
382                         temp = (uint8_t *)&req->msg[4];
383                         for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE; i++) {
384                                 resp->additional_info[i] = *temp;
385                                 temp++;
386                         }
387                         msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
388                         hns3_update_resp_position(hw, msg_data);
389                         break;
390                 case HNS3_MBX_LINK_STAT_CHANGE:
391                 case HNS3_MBX_ASSERTING_RESET:
392                         msg_q = hw->arq.msg_q[hw->arq.tail];
393                         memcpy(&msg_q[0], req->msg,
394                                HNS3_MBX_MAX_ARQ_MSG_SIZE * sizeof(uint16_t));
395                         hns3_mbx_tail_ptr_move_arq(hw->arq);
396
397                         hns3_mbx_handler(hw);
398                         break;
399                 case HNS3_MBX_PUSH_LINK_STATUS:
400                         hns3_handle_link_change_event(hw, req);
401                         break;
402                 case HNS3_MBX_PUSH_PROMISC_INFO:
403                         /*
404                          * When the trust status of VF device changed by the
405                          * hns3 PF kernel driver, VF driver will receive this
406                          * mailbox message from PF driver.
407                          */
408                         hns3_handle_promisc_info(hw, req->msg[1]);
409                         break;
410                 default:
411                         hns3_err(hw,
412                                  "VF received unsupported(%d) mbx msg from PF",
413                                  req->msg[0]);
414                         break;
415                 }
416
417                 crq->desc[crq->next_to_use].flag = 0;
418                 hns3_mbx_ring_ptr_move_crq(crq);
419         }
420
421         /* Write back CMDQ_RQ header pointer, IMP need this pointer */
422         hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
423 }