net/hns3: fix copyright date
[dpdk.git] / drivers / net / hns3 / hns3_mbx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <ethdev_driver.h>
6 #include <rte_io.h>
7
8 #include "hns3_ethdev.h"
9 #include "hns3_regs.h"
10 #include "hns3_logs.h"
11 #include "hns3_intr.h"
12 #include "hns3_rxtx.h"
13
14 #define HNS3_CMD_CODE_OFFSET            2
15
16 static const struct errno_respcode_map err_code_map[] = {
17         {0, 0},
18         {1, -EPERM},
19         {2, -ENOENT},
20         {5, -EIO},
21         {11, -EAGAIN},
22         {12, -ENOMEM},
23         {16, -EBUSY},
24         {22, -EINVAL},
25         {28, -ENOSPC},
26         {95, -EOPNOTSUPP},
27 };
28
29 static int
30 hns3_resp_to_errno(uint16_t resp_code)
31 {
32         uint32_t i, num;
33
34         num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
35         for (i = 0; i < num; i++) {
36                 if (err_code_map[i].resp_code == resp_code)
37                         return err_code_map[i].err_no;
38         }
39
40         return -EIO;
41 }
42
43 static void
44 hns3_poll_all_sync_msg(void)
45 {
46         struct rte_eth_dev *eth_dev;
47         struct hns3_adapter *adapter;
48         const char *name;
49         uint16_t port_id;
50
51         RTE_ETH_FOREACH_DEV(port_id) {
52                 eth_dev = &rte_eth_devices[port_id];
53                 name = eth_dev->device->driver->name;
54                 if (strcmp(name, "net_hns3") && strcmp(name, "net_hns3_vf"))
55                         continue;
56                 adapter = eth_dev->data->dev_private;
57                 if (!adapter || adapter->hw.adapter_state == HNS3_NIC_CLOSED)
58                         continue;
59                 /* Synchronous msg, the mbx_resp.req_msg_data is non-zero */
60                 if (adapter->hw.mbx_resp.req_msg_data)
61                         hns3_dev_handle_mbx_msg(&adapter->hw);
62         }
63 }
64
65 static int
66 hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
67                   uint8_t *resp_data, uint16_t resp_len)
68 {
69 #define HNS3_MAX_RETRY_MS       500
70         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
71         struct hns3_mbx_resp_status *mbx_resp;
72         bool in_irq = false;
73         uint64_t now;
74         uint64_t end;
75
76         if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
77                 hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
78                          resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
79                 return -EINVAL;
80         }
81
82         now = get_timeofday_ms();
83         end = now + HNS3_MAX_RETRY_MS;
84         while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
85                (now < end)) {
86                 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
87                         hns3_err(hw, "Don't wait for mbx respone because of "
88                                  "disable_cmd");
89                         return -EBUSY;
90                 }
91
92                 if (is_reset_pending(hns)) {
93                         hw->mbx_resp.req_msg_data = 0;
94                         hns3_err(hw, "Don't wait for mbx respone because of "
95                                  "reset pending");
96                         return -EIO;
97                 }
98
99                 /*
100                  * The mbox response is running on the interrupt thread.
101                  * Sending mbox in the interrupt thread cannot wait for the
102                  * response, so polling the mbox response on the irq thread.
103                  */
104                 if (pthread_equal(hw->irq_thread_id, pthread_self())) {
105                         in_irq = true;
106                         hns3_poll_all_sync_msg();
107                 } else {
108                         rte_delay_ms(HNS3_POLL_RESPONE_MS);
109                 }
110                 now = get_timeofday_ms();
111         }
112         hw->mbx_resp.req_msg_data = 0;
113         if (now >= end) {
114                 hw->mbx_resp.lost++;
115                 hns3_err(hw,
116                          "VF could not get mbx(%u,%u) head(%u) tail(%u) lost(%u) from PF in_irq:%d",
117                          code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail,
118                          hw->mbx_resp.lost, in_irq);
119                 return -ETIME;
120         }
121         rte_io_rmb();
122         mbx_resp = &hw->mbx_resp;
123
124         if (mbx_resp->resp_status)
125                 return mbx_resp->resp_status;
126
127         if (resp_data)
128                 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
129
130         return 0;
131 }
132
133 int
134 hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
135                   const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
136                   uint8_t *resp_data, uint16_t resp_len)
137 {
138         struct hns3_mbx_vf_to_pf_cmd *req;
139         struct hns3_cmd_desc desc;
140         bool is_ring_vector_msg;
141         int offset;
142         int ret;
143
144         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
145
146         /* first two bytes are reserved for code & subcode */
147         if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
148                 hns3_err(hw,
149                          "VF send mbx msg fail, msg len %u exceeds max payload len %d",
150                          msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
151                 return -EINVAL;
152         }
153
154         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
155         req->msg[0] = code;
156         is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
157                              (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
158                              (code == HNS3_MBX_GET_RING_VECTOR_MAP);
159         if (!is_ring_vector_msg)
160                 req->msg[1] = subcode;
161         if (msg_data) {
162                 offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
163                 memcpy(&req->msg[offset], msg_data, msg_len);
164         }
165
166         /* synchronous send */
167         if (need_resp) {
168                 req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
169                 rte_spinlock_lock(&hw->mbx_resp.lock);
170                 hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
171                 hw->mbx_resp.head++;
172                 ret = hns3_cmd_send(hw, &desc, 1);
173                 if (ret) {
174                         rte_spinlock_unlock(&hw->mbx_resp.lock);
175                         hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
176                                  ret);
177                         return ret;
178                 }
179
180                 ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
181                 rte_spinlock_unlock(&hw->mbx_resp.lock);
182         } else {
183                 /* asynchronous send */
184                 ret = hns3_cmd_send(hw, &desc, 1);
185                 if (ret) {
186                         hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
187                                  ret);
188                         return ret;
189                 }
190         }
191
192         return ret;
193 }
194
195 static bool
196 hns3_cmd_crq_empty(struct hns3_hw *hw)
197 {
198         uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
199
200         return tail == hw->cmq.crq.next_to_use;
201 }
202
203 static void
204 hns3_mbx_handler(struct hns3_hw *hw)
205 {
206         enum hns3_reset_level reset_level;
207         uint8_t link_status, link_duplex;
208         uint32_t link_speed;
209         uint16_t *msg_q;
210         uint8_t opcode;
211         uint32_t tail;
212
213         tail = hw->arq.tail;
214
215         /* process all the async queue messages */
216         while (tail != hw->arq.head) {
217                 msg_q = hw->arq.msg_q[hw->arq.head];
218
219                 opcode = msg_q[0] & 0xff;
220                 switch (opcode) {
221                 case HNS3_MBX_LINK_STAT_CHANGE:
222                         memcpy(&link_speed, &msg_q[2], sizeof(link_speed));
223                         link_status = rte_le_to_cpu_16(msg_q[1]);
224                         link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
225                         hns3vf_update_link_status(hw, link_status, link_speed,
226                                                   link_duplex);
227                         break;
228                 case HNS3_MBX_ASSERTING_RESET:
229                         /* PF has asserted reset hence VF should go in pending
230                          * state and poll for the hardware reset status till it
231                          * has been completely reset. After this stack should
232                          * eventually be re-initialized.
233                          */
234                         reset_level = rte_le_to_cpu_16(msg_q[1]);
235                         hns3_atomic_set_bit(reset_level, &hw->reset.pending);
236
237                         hns3_warn(hw, "PF inform reset level %d", reset_level);
238                         hw->reset.stats.request_cnt++;
239                         hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
240                         break;
241                 default:
242                         hns3_err(hw, "Fetched unsupported(%u) message from arq",
243                                  opcode);
244                         break;
245                 }
246
247                 hns3_mbx_head_ptr_move_arq(hw->arq);
248                 msg_q = hw->arq.msg_q[hw->arq.head];
249         }
250 }
251
252 /*
253  * Case1: receive response after timeout, req_msg_data
254  *        is 0, not equal resp_msg, do lost--
255  * Case2: receive last response during new send_mbx_msg,
256  *        req_msg_data is different with resp_msg, let
257  *        lost--, continue to wait for response.
258  */
259 static void
260 hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
261 {
262         struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
263         uint32_t tail = resp->tail + 1;
264
265         if (tail > resp->head)
266                 tail = resp->head;
267         if (resp->req_msg_data != resp_msg) {
268                 if (resp->lost)
269                         resp->lost--;
270                 hns3_warn(hw, "Received a mismatched response req_msg(%x) "
271                           "resp_msg(%x) head(%u) tail(%u) lost(%u)",
272                           resp->req_msg_data, resp_msg, resp->head, tail,
273                           resp->lost);
274         } else if (tail + resp->lost > resp->head) {
275                 resp->lost--;
276                 hns3_warn(hw, "Received a new response again resp_msg(%x) "
277                           "head(%u) tail(%u) lost(%u)", resp_msg,
278                           resp->head, tail, resp->lost);
279         }
280         rte_io_wmb();
281         resp->tail = tail;
282 }
283
284 static void
285 hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
286 {
287         switch (link_fail_code) {
288         case HNS3_MBX_LF_NORMAL:
289                 break;
290         case HNS3_MBX_LF_REF_CLOCK_LOST:
291                 hns3_warn(hw, "Reference clock lost!");
292                 break;
293         case HNS3_MBX_LF_XSFP_TX_DISABLE:
294                 hns3_warn(hw, "SFP tx is disabled!");
295                 break;
296         case HNS3_MBX_LF_XSFP_ABSENT:
297                 hns3_warn(hw, "SFP is absent!");
298                 break;
299         default:
300                 hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
301                 break;
302         }
303 }
304
305 static void
306 hns3_handle_link_change_event(struct hns3_hw *hw,
307                               struct hns3_mbx_pf_to_vf_cmd *req)
308 {
309 #define LINK_STATUS_OFFSET     1
310 #define LINK_FAIL_CODE_OFFSET  2
311
312         if (!req->msg[LINK_STATUS_OFFSET])
313                 hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
314
315         hns3_update_link_status_and_event(hw);
316 }
317
318 static void
319 hns3_update_port_base_vlan_info(struct hns3_hw *hw,
320                                 struct hns3_mbx_pf_to_vf_cmd *req)
321 {
322 #define PVID_STATE_OFFSET       1
323         uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
324                 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
325         /*
326          * Currently, hardware doesn't support more than two layers VLAN offload
327          * based on hns3 network engine, which would cause packets loss or wrong
328          * packets for these types of packets. If the hns3 PF kernel ethdev
329          * driver sets the PVID for VF device after initialization of the
330          * related VF device, the PF driver will notify VF driver to update the
331          * PVID configuration state. The VF driver will update the PVID
332          * configuration state immediately to ensure that the VLAN process in Tx
333          * and Rx is correct. But in the window period of this state transition,
334          * packets loss or packets with wrong VLAN may occur.
335          */
336         if (hw->port_base_vlan_cfg.state != new_pvid_state) {
337                 hw->port_base_vlan_cfg.state = new_pvid_state;
338                 hns3_update_all_queues_pvid_proc_en(hw);
339         }
340 }
341
342 static void
343 hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
344 {
345         if (!promisc_en) {
346                 /*
347                  * When promisc/allmulti mode is closed by the hns3 PF kernel
348                  * ethdev driver for untrusted, modify VF's related status.
349                  */
350                 hns3_warn(hw, "Promisc mode will be closed by host for being "
351                               "untrusted.");
352                 hw->data->promiscuous = 0;
353                 hw->data->all_multicast = 0;
354         }
355 }
356
357 void
358 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
359 {
360         struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
361         struct hns3_cmq_ring *crq = &hw->cmq.crq;
362         struct hns3_mbx_pf_to_vf_cmd *req;
363         struct hns3_cmd_desc *desc;
364         uint32_t msg_data;
365         uint16_t *msg_q;
366         uint8_t opcode;
367         uint16_t flag;
368         uint8_t *temp;
369         int i;
370
371         while (!hns3_cmd_crq_empty(hw)) {
372                 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
373                         return;
374
375                 desc = &crq->desc[crq->next_to_use];
376                 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
377                 opcode = req->msg[0] & 0xff;
378
379                 flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
380                 if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
381                         hns3_warn(hw,
382                                   "dropped invalid mailbox message, code = %u",
383                                   opcode);
384
385                         /* dropping/not processing this invalid message */
386                         crq->desc[crq->next_to_use].flag = 0;
387                         hns3_mbx_ring_ptr_move_crq(crq);
388                         continue;
389                 }
390
391                 switch (opcode) {
392                 case HNS3_MBX_PF_VF_RESP:
393                         resp->resp_status = hns3_resp_to_errno(req->msg[3]);
394
395                         temp = (uint8_t *)&req->msg[4];
396                         for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE; i++) {
397                                 resp->additional_info[i] = *temp;
398                                 temp++;
399                         }
400                         msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
401                         hns3_update_resp_position(hw, msg_data);
402                         break;
403                 case HNS3_MBX_LINK_STAT_CHANGE:
404                 case HNS3_MBX_ASSERTING_RESET:
405                         msg_q = hw->arq.msg_q[hw->arq.tail];
406                         memcpy(&msg_q[0], req->msg,
407                                HNS3_MBX_MAX_ARQ_MSG_SIZE * sizeof(uint16_t));
408                         hns3_mbx_tail_ptr_move_arq(hw->arq);
409
410                         hns3_mbx_handler(hw);
411                         break;
412                 case HNS3_MBX_PUSH_LINK_STATUS:
413                         hns3_handle_link_change_event(hw, req);
414                         break;
415                 case HNS3_MBX_PUSH_VLAN_INFO:
416                         /*
417                          * When the PVID configuration status of VF device is
418                          * changed by the hns3 PF kernel driver, VF driver will
419                          * receive this mailbox message from PF driver.
420                          */
421                         hns3_update_port_base_vlan_info(hw, req);
422                         break;
423                 case HNS3_MBX_PUSH_PROMISC_INFO:
424                         /*
425                          * When the trust status of VF device changed by the
426                          * hns3 PF kernel driver, VF driver will receive this
427                          * mailbox message from PF driver.
428                          */
429                         hns3_handle_promisc_info(hw, req->msg[1]);
430                         break;
431                 default:
432                         hns3_err(hw,
433                                  "VF received unsupported(%u) mbx msg from PF",
434                                  req->msg[0]);
435                         break;
436                 }
437
438                 crq->desc[crq->next_to_use].flag = 0;
439                 hns3_mbx_ring_ptr_move_crq(crq);
440         }
441
442         /* Write back CMDQ_RQ header pointer, IMP need this pointer */
443         hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
444 }