common/sfc_efx/base: implement Tx control path for Riverhead
[dpdk.git] / drivers / net / hns3 / hns3_mbx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <inttypes.h>
11 #include <unistd.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
15 #include <rte_dev.h>
16 #include <rte_ethdev_driver.h>
17 #include <rte_io.h>
18 #include <rte_spinlock.h>
19 #include <rte_pci.h>
20 #include <rte_bus_pci.h>
21
22 #include "hns3_ethdev.h"
23 #include "hns3_regs.h"
24 #include "hns3_logs.h"
25 #include "hns3_intr.h"
26 #include "hns3_rxtx.h"
27
28 #define HNS3_CMD_CODE_OFFSET            2
29
30 static const struct errno_respcode_map err_code_map[] = {
31         {0, 0},
32         {1, -EPERM},
33         {2, -ENOENT},
34         {5, -EIO},
35         {11, -EAGAIN},
36         {12, -ENOMEM},
37         {16, -EBUSY},
38         {22, -EINVAL},
39         {28, -ENOSPC},
40         {95, -EOPNOTSUPP},
41 };
42
43 static int
44 hns3_resp_to_errno(uint16_t resp_code)
45 {
46         uint32_t i, num;
47
48         num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
49         for (i = 0; i < num; i++) {
50                 if (err_code_map[i].resp_code == resp_code)
51                         return err_code_map[i].err_no;
52         }
53
54         return -EIO;
55 }
56
57 static void
58 hns3_poll_all_sync_msg(void)
59 {
60         struct rte_eth_dev *eth_dev;
61         struct hns3_adapter *adapter;
62         const char *name;
63         uint16_t port_id;
64
65         RTE_ETH_FOREACH_DEV(port_id) {
66                 eth_dev = &rte_eth_devices[port_id];
67                 name = eth_dev->device->driver->name;
68                 if (strcmp(name, "net_hns3") && strcmp(name, "net_hns3_vf"))
69                         continue;
70                 adapter = eth_dev->data->dev_private;
71                 if (!adapter || adapter->hw.adapter_state == HNS3_NIC_CLOSED)
72                         continue;
73                 /* Synchronous msg, the mbx_resp.req_msg_data is non-zero */
74                 if (adapter->hw.mbx_resp.req_msg_data)
75                         hns3_dev_handle_mbx_msg(&adapter->hw);
76         }
77 }
78
79 static int
80 hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
81                   uint8_t *resp_data, uint16_t resp_len)
82 {
83 #define HNS3_MAX_RETRY_MS       500
84         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
85         struct hns3_mbx_resp_status *mbx_resp;
86         bool in_irq = false;
87         uint64_t now;
88         uint64_t end;
89
90         if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
91                 hns3_err(hw, "VF mbx response len(=%d) exceeds maximum(=%d)",
92                          resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
93                 return -EINVAL;
94         }
95
96         now = get_timeofday_ms();
97         end = now + HNS3_MAX_RETRY_MS;
98         while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
99                (now < end)) {
100                 if (rte_atomic16_read(&hw->reset.disable_cmd)) {
101                         hns3_err(hw, "Don't wait for mbx respone because of "
102                                  "disable_cmd");
103                         return -EBUSY;
104                 }
105
106                 if (is_reset_pending(hns)) {
107                         hw->mbx_resp.req_msg_data = 0;
108                         hns3_err(hw, "Don't wait for mbx respone because of "
109                                  "reset pending");
110                         return -EIO;
111                 }
112
113                 /*
114                  * The mbox response is running on the interrupt thread.
115                  * Sending mbox in the interrupt thread cannot wait for the
116                  * response, so polling the mbox response on the irq thread.
117                  */
118                 if (pthread_equal(hw->irq_thread_id, pthread_self())) {
119                         in_irq = true;
120                         hns3_poll_all_sync_msg();
121                 } else {
122                         rte_delay_ms(HNS3_POLL_RESPONE_MS);
123                 }
124                 now = get_timeofday_ms();
125         }
126         hw->mbx_resp.req_msg_data = 0;
127         if (now >= end) {
128                 hw->mbx_resp.lost++;
129                 hns3_err(hw,
130                          "VF could not get mbx(%d,%d) head(%d) tail(%d) lost(%d) from PF in_irq:%d",
131                          code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail,
132                          hw->mbx_resp.lost, in_irq);
133                 return -ETIME;
134         }
135         rte_io_rmb();
136         mbx_resp = &hw->mbx_resp;
137
138         if (mbx_resp->resp_status)
139                 return mbx_resp->resp_status;
140
141         if (resp_data)
142                 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
143
144         return 0;
145 }
146
147 int
148 hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
149                   const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
150                   uint8_t *resp_data, uint16_t resp_len)
151 {
152         struct hns3_mbx_vf_to_pf_cmd *req;
153         struct hns3_cmd_desc desc;
154         bool is_ring_vector_msg;
155         int offset;
156         int ret;
157
158         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
159
160         /* first two bytes are reserved for code & subcode */
161         if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
162                 hns3_err(hw,
163                          "VF send mbx msg fail, msg len %d exceeds max payload len %d",
164                          msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
165                 return -EINVAL;
166         }
167
168         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
169         req->msg[0] = code;
170         is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
171                              (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
172                              (code == HNS3_MBX_GET_RING_VECTOR_MAP);
173         if (!is_ring_vector_msg)
174                 req->msg[1] = subcode;
175         if (msg_data) {
176                 offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
177                 memcpy(&req->msg[offset], msg_data, msg_len);
178         }
179
180         /* synchronous send */
181         if (need_resp) {
182                 req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
183                 rte_spinlock_lock(&hw->mbx_resp.lock);
184                 hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
185                 hw->mbx_resp.head++;
186                 ret = hns3_cmd_send(hw, &desc, 1);
187                 if (ret) {
188                         rte_spinlock_unlock(&hw->mbx_resp.lock);
189                         hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
190                                  ret);
191                         return ret;
192                 }
193
194                 ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
195                 rte_spinlock_unlock(&hw->mbx_resp.lock);
196         } else {
197                 /* asynchronous send */
198                 ret = hns3_cmd_send(hw, &desc, 1);
199                 if (ret) {
200                         hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
201                                  ret);
202                         return ret;
203                 }
204         }
205
206         return ret;
207 }
208
209 static bool
210 hns3_cmd_crq_empty(struct hns3_hw *hw)
211 {
212         uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
213
214         return tail == hw->cmq.crq.next_to_use;
215 }
216
217 static void
218 hns3_mbx_handler(struct hns3_hw *hw)
219 {
220         struct hns3_mac *mac = &hw->mac;
221         enum hns3_reset_level reset_level;
222         uint16_t *msg_q;
223         uint8_t opcode;
224         uint32_t tail;
225
226         tail = hw->arq.tail;
227
228         /* process all the async queue messages */
229         while (tail != hw->arq.head) {
230                 msg_q = hw->arq.msg_q[hw->arq.head];
231
232                 opcode = msg_q[0] & 0xff;
233                 switch (opcode) {
234                 case HNS3_MBX_LINK_STAT_CHANGE:
235                         memcpy(&mac->link_speed, &msg_q[2],
236                                    sizeof(mac->link_speed));
237                         mac->link_status = rte_le_to_cpu_16(msg_q[1]);
238                         mac->link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
239                         break;
240                 case HNS3_MBX_ASSERTING_RESET:
241                         /* PF has asserted reset hence VF should go in pending
242                          * state and poll for the hardware reset status till it
243                          * has been completely reset. After this stack should
244                          * eventually be re-initialized.
245                          */
246                         reset_level = rte_le_to_cpu_16(msg_q[1]);
247                         hns3_atomic_set_bit(reset_level, &hw->reset.pending);
248
249                         hns3_warn(hw, "PF inform reset level %d", reset_level);
250                         hw->reset.stats.request_cnt++;
251                         hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
252                         break;
253                 default:
254                         hns3_err(hw, "Fetched unsupported(%d) message from arq",
255                                  opcode);
256                         break;
257                 }
258
259                 hns3_mbx_head_ptr_move_arq(hw->arq);
260                 msg_q = hw->arq.msg_q[hw->arq.head];
261         }
262 }
263
264 /*
265  * Case1: receive response after timeout, req_msg_data
266  *        is 0, not equal resp_msg, do lost--
267  * Case2: receive last response during new send_mbx_msg,
268  *        req_msg_data is different with resp_msg, let
269  *        lost--, continue to wait for response.
270  */
271 static void
272 hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
273 {
274         struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
275         uint32_t tail = resp->tail + 1;
276
277         if (tail > resp->head)
278                 tail = resp->head;
279         if (resp->req_msg_data != resp_msg) {
280                 if (resp->lost)
281                         resp->lost--;
282                 hns3_warn(hw, "Received a mismatched response req_msg(%x) "
283                           "resp_msg(%x) head(%d) tail(%d) lost(%d)",
284                           resp->req_msg_data, resp_msg, resp->head, tail,
285                           resp->lost);
286         } else if (tail + resp->lost > resp->head) {
287                 resp->lost--;
288                 hns3_warn(hw, "Received a new response again resp_msg(%x) "
289                           "head(%d) tail(%d) lost(%d)", resp_msg,
290                           resp->head, tail, resp->lost);
291         }
292         rte_io_wmb();
293         resp->tail = tail;
294 }
295
296 static void
297 hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
298 {
299         switch (link_fail_code) {
300         case HNS3_MBX_LF_NORMAL:
301                 break;
302         case HNS3_MBX_LF_REF_CLOCK_LOST:
303                 hns3_warn(hw, "Reference clock lost!");
304                 break;
305         case HNS3_MBX_LF_XSFP_TX_DISABLE:
306                 hns3_warn(hw, "SFP tx is disabled!");
307                 break;
308         case HNS3_MBX_LF_XSFP_ABSENT:
309                 hns3_warn(hw, "SFP is absent!");
310                 break;
311         default:
312                 hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
313                 break;
314         }
315 }
316
317 static void
318 hns3_handle_link_change_event(struct hns3_hw *hw,
319                               struct hns3_mbx_pf_to_vf_cmd *req)
320 {
321 #define LINK_STATUS_OFFSET     1
322 #define LINK_FAIL_CODE_OFFSET  2
323
324         if (!req->msg[LINK_STATUS_OFFSET])
325                 hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
326
327         hns3_update_link_status(hw);
328 }
329
330 static void
331 hns3_update_port_base_vlan_info(struct hns3_hw *hw,
332                                 struct hns3_mbx_pf_to_vf_cmd *req)
333 {
334 #define PVID_STATE_OFFSET       1
335         uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
336                 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
337         /*
338          * Currently, hardware doesn't support more than two layers VLAN offload
339          * based on hns3 network engine, which would cause packets loss or wrong
340          * packets for these types of packets. If the hns3 PF kernel ethdev
341          * driver sets the PVID for VF device after initialization of the
342          * related VF device, the PF driver will notify VF driver to update the
343          * PVID configuration state. The VF driver will update the PVID
344          * configuration state immediately to ensure that the VLAN process in Tx
345          * and Rx is correct. But in the window period of this state transition,
346          * packets loss or packets with wrong VLAN may occur.
347          */
348         if (hw->port_base_vlan_cfg.state != new_pvid_state) {
349                 hw->port_base_vlan_cfg.state = new_pvid_state;
350                 hns3_update_all_queues_pvid_proc_en(hw);
351         }
352 }
353
354 static void
355 hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
356 {
357         if (!promisc_en) {
358                 /*
359                  * When promisc/allmulti mode is closed by the hns3 PF kernel
360                  * ethdev driver for untrusted, modify VF's related status.
361                  */
362                 hns3_warn(hw, "Promisc mode will be closed by host for being "
363                               "untrusted.");
364                 hw->data->promiscuous = 0;
365                 hw->data->all_multicast = 0;
366         }
367 }
368
369 void
370 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
371 {
372         struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
373         struct hns3_cmq_ring *crq = &hw->cmq.crq;
374         struct hns3_mbx_pf_to_vf_cmd *req;
375         struct hns3_cmd_desc *desc;
376         uint32_t msg_data;
377         uint16_t *msg_q;
378         uint8_t opcode;
379         uint16_t flag;
380         uint8_t *temp;
381         int i;
382
383         while (!hns3_cmd_crq_empty(hw)) {
384                 if (rte_atomic16_read(&hw->reset.disable_cmd))
385                         return;
386
387                 desc = &crq->desc[crq->next_to_use];
388                 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
389                 opcode = req->msg[0] & 0xff;
390
391                 flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
392                 if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
393                         hns3_warn(hw,
394                                   "dropped invalid mailbox message, code = %d",
395                                   opcode);
396
397                         /* dropping/not processing this invalid message */
398                         crq->desc[crq->next_to_use].flag = 0;
399                         hns3_mbx_ring_ptr_move_crq(crq);
400                         continue;
401                 }
402
403                 switch (opcode) {
404                 case HNS3_MBX_PF_VF_RESP:
405                         resp->resp_status = hns3_resp_to_errno(req->msg[3]);
406
407                         temp = (uint8_t *)&req->msg[4];
408                         for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE; i++) {
409                                 resp->additional_info[i] = *temp;
410                                 temp++;
411                         }
412                         msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
413                         hns3_update_resp_position(hw, msg_data);
414                         break;
415                 case HNS3_MBX_LINK_STAT_CHANGE:
416                 case HNS3_MBX_ASSERTING_RESET:
417                         msg_q = hw->arq.msg_q[hw->arq.tail];
418                         memcpy(&msg_q[0], req->msg,
419                                HNS3_MBX_MAX_ARQ_MSG_SIZE * sizeof(uint16_t));
420                         hns3_mbx_tail_ptr_move_arq(hw->arq);
421
422                         hns3_mbx_handler(hw);
423                         break;
424                 case HNS3_MBX_PUSH_LINK_STATUS:
425                         hns3_handle_link_change_event(hw, req);
426                         break;
427                 case HNS3_MBX_PUSH_VLAN_INFO:
428                         /*
429                          * When the PVID configuration status of VF device is
430                          * changed by the hns3 PF kernel driver, VF driver will
431                          * receive this mailbox message from PF driver.
432                          */
433                         hns3_update_port_base_vlan_info(hw, req);
434                         break;
435                 case HNS3_MBX_PUSH_PROMISC_INFO:
436                         /*
437                          * When the trust status of VF device changed by the
438                          * hns3 PF kernel driver, VF driver will receive this
439                          * mailbox message from PF driver.
440                          */
441                         hns3_handle_promisc_info(hw, req->msg[1]);
442                         break;
443                 default:
444                         hns3_err(hw,
445                                  "VF received unsupported(%d) mbx msg from PF",
446                                  req->msg[0]);
447                         break;
448                 }
449
450                 crq->desc[crq->next_to_use].flag = 0;
451                 hns3_mbx_ring_ptr_move_crq(crq);
452         }
453
454         /* Write back CMDQ_RQ header pointer, IMP need this pointer */
455         hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
456 }