net/hns3: unify multicast MAC address set list
[dpdk.git] / drivers / net / hns3 / hns3_mbx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <ethdev_driver.h>
6 #include <rte_io.h>
7
8 #include "hns3_ethdev.h"
9 #include "hns3_regs.h"
10 #include "hns3_logs.h"
11 #include "hns3_intr.h"
12 #include "hns3_rxtx.h"
13
14 #define HNS3_CMD_CODE_OFFSET            2
15
16 static const struct errno_respcode_map err_code_map[] = {
17         {0, 0},
18         {1, -EPERM},
19         {2, -ENOENT},
20         {5, -EIO},
21         {11, -EAGAIN},
22         {12, -ENOMEM},
23         {16, -EBUSY},
24         {22, -EINVAL},
25         {28, -ENOSPC},
26         {95, -EOPNOTSUPP},
27 };
28
29 static int
30 hns3_resp_to_errno(uint16_t resp_code)
31 {
32         uint32_t i, num;
33
34         num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
35         for (i = 0; i < num; i++) {
36                 if (err_code_map[i].resp_code == resp_code)
37                         return err_code_map[i].err_no;
38         }
39
40         return -EIO;
41 }
42
43 static void
44 hns3_mbx_proc_timeout(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
45 {
46         if (hw->mbx_resp.matching_scheme ==
47             HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) {
48                 hw->mbx_resp.lost++;
49                 hns3_err(hw,
50                          "VF could not get mbx(%u,%u) head(%u) tail(%u) "
51                          "lost(%u) from PF",
52                          code, subcode, hw->mbx_resp.head, hw->mbx_resp.tail,
53                          hw->mbx_resp.lost);
54                 return;
55         }
56
57         hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode);
58 }
59
60 static int
61 hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
62                   uint8_t *resp_data, uint16_t resp_len)
63 {
64 #define HNS3_WAIT_RESP_US       100
65 #define US_PER_MS               1000
66         uint32_t mbx_time_limit;
67         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
68         struct hns3_mbx_resp_status *mbx_resp;
69         uint32_t wait_time = 0;
70         bool received;
71
72         if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
73                 hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
74                          resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
75                 return -EINVAL;
76         }
77
78         mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
79         while (wait_time < mbx_time_limit) {
80                 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
81                         hns3_err(hw, "Don't wait for mbx respone because of "
82                                  "disable_cmd");
83                         return -EBUSY;
84                 }
85
86                 if (is_reset_pending(hns)) {
87                         hw->mbx_resp.req_msg_data = 0;
88                         hns3_err(hw, "Don't wait for mbx respone because of "
89                                  "reset pending");
90                         return -EIO;
91                 }
92
93                 hns3_dev_handle_mbx_msg(hw);
94                 rte_delay_us(HNS3_WAIT_RESP_US);
95
96                 if (hw->mbx_resp.matching_scheme ==
97                     HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL)
98                         received = (hw->mbx_resp.head ==
99                                     hw->mbx_resp.tail + hw->mbx_resp.lost);
100                 else
101                         received = hw->mbx_resp.received_match_resp;
102                 if (received)
103                         break;
104
105                 wait_time += HNS3_WAIT_RESP_US;
106         }
107         hw->mbx_resp.req_msg_data = 0;
108         if (wait_time >= mbx_time_limit) {
109                 hns3_mbx_proc_timeout(hw, code, subcode);
110                 return -ETIME;
111         }
112         rte_io_rmb();
113         mbx_resp = &hw->mbx_resp;
114
115         if (mbx_resp->resp_status)
116                 return mbx_resp->resp_status;
117
118         if (resp_data)
119                 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
120
121         return 0;
122 }
123
124 static void
125 hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
126 {
127         /*
128          * Init both matching scheme fields because we may not know the exact
129          * scheme will be used when in the initial phase.
130          *
131          * Also, there are OK to init both matching scheme fields even though
132          * we get the exact scheme which is used.
133          */
134         hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
135         hw->mbx_resp.head++;
136
137         /* Update match_id and ensure the value of match_id is not zero */
138         hw->mbx_resp.match_id++;
139         if (hw->mbx_resp.match_id == 0)
140                 hw->mbx_resp.match_id = 1;
141         hw->mbx_resp.received_match_resp = false;
142
143         hw->mbx_resp.resp_status = 0;
144         memset(hw->mbx_resp.additional_info, 0, HNS3_MBX_MAX_RESP_DATA_SIZE);
145 }
146
147 int
148 hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
149                   const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
150                   uint8_t *resp_data, uint16_t resp_len)
151 {
152         struct hns3_mbx_vf_to_pf_cmd *req;
153         struct hns3_cmd_desc desc;
154         bool is_ring_vector_msg;
155         int offset;
156         int ret;
157
158         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
159
160         /* first two bytes are reserved for code & subcode */
161         if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
162                 hns3_err(hw,
163                          "VF send mbx msg fail, msg len %u exceeds max payload len %d",
164                          msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
165                 return -EINVAL;
166         }
167
168         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
169         req->msg[0] = code;
170         is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
171                              (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
172                              (code == HNS3_MBX_GET_RING_VECTOR_MAP);
173         if (!is_ring_vector_msg)
174                 req->msg[1] = subcode;
175         if (msg_data) {
176                 offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
177                 memcpy(&req->msg[offset], msg_data, msg_len);
178         }
179
180         /* synchronous send */
181         if (need_resp) {
182                 req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
183                 rte_spinlock_lock(&hw->mbx_resp.lock);
184                 hns3_mbx_prepare_resp(hw, code, subcode);
185                 req->match_id = hw->mbx_resp.match_id;
186                 ret = hns3_cmd_send(hw, &desc, 1);
187                 if (ret) {
188                         hw->mbx_resp.head--;
189                         rte_spinlock_unlock(&hw->mbx_resp.lock);
190                         hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
191                                  ret);
192                         return ret;
193                 }
194
195                 ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
196                 rte_spinlock_unlock(&hw->mbx_resp.lock);
197         } else {
198                 /* asynchronous send */
199                 ret = hns3_cmd_send(hw, &desc, 1);
200                 if (ret) {
201                         hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
202                                  ret);
203                         return ret;
204                 }
205         }
206
207         return ret;
208 }
209
210 static bool
211 hns3_cmd_crq_empty(struct hns3_hw *hw)
212 {
213         uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
214
215         return tail == hw->cmq.crq.next_to_use;
216 }
217
218 static void
219 hns3vf_handle_link_change_event(struct hns3_hw *hw,
220                                 struct hns3_mbx_pf_to_vf_cmd *req)
221 {
222         uint8_t link_status, link_duplex;
223         uint16_t *msg_q = req->msg;
224         uint8_t support_push_lsc;
225         uint32_t link_speed;
226
227         memcpy(&link_speed, &msg_q[2], sizeof(link_speed));
228         link_status = rte_le_to_cpu_16(msg_q[1]);
229         link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
230         hns3vf_update_link_status(hw, link_status, link_speed,
231                                   link_duplex);
232         support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u;
233         hns3vf_update_push_lsc_cap(hw, support_push_lsc);
234 }
235
236 static void
237 hns3_handle_asserting_reset(struct hns3_hw *hw,
238                             struct hns3_mbx_pf_to_vf_cmd *req)
239 {
240         enum hns3_reset_level reset_level;
241         uint16_t *msg_q = req->msg;
242
243         /*
244          * PF has asserted reset hence VF should go in pending
245          * state and poll for the hardware reset status till it
246          * has been completely reset. After this stack should
247          * eventually be re-initialized.
248          */
249         reset_level = rte_le_to_cpu_16(msg_q[1]);
250         hns3_atomic_set_bit(reset_level, &hw->reset.pending);
251
252         hns3_warn(hw, "PF inform reset level %d", reset_level);
253         hw->reset.stats.request_cnt++;
254         hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
255 }
256
257 /*
258  * Case1: receive response after timeout, req_msg_data
259  *        is 0, not equal resp_msg, do lost--
260  * Case2: receive last response during new send_mbx_msg,
261  *        req_msg_data is different with resp_msg, let
262  *        lost--, continue to wait for response.
263  */
264 static void
265 hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
266 {
267         struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
268         uint32_t tail = resp->tail + 1;
269
270         if (tail > resp->head)
271                 tail = resp->head;
272         if (resp->req_msg_data != resp_msg) {
273                 if (resp->lost)
274                         resp->lost--;
275                 hns3_warn(hw, "Received a mismatched response req_msg(%x) "
276                           "resp_msg(%x) head(%u) tail(%u) lost(%u)",
277                           resp->req_msg_data, resp_msg, resp->head, tail,
278                           resp->lost);
279         } else if (tail + resp->lost > resp->head) {
280                 resp->lost--;
281                 hns3_warn(hw, "Received a new response again resp_msg(%x) "
282                           "head(%u) tail(%u) lost(%u)", resp_msg,
283                           resp->head, tail, resp->lost);
284         }
285         rte_io_wmb();
286         resp->tail = tail;
287 }
288
289 static void
290 hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
291 {
292         struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
293         uint32_t msg_data;
294
295         if (req->match_id != 0) {
296                 /*
297                  * If match_id is not zero, it means PF support copy request's
298                  * match_id to its response. So VF could use the match_id
299                  * to match the request.
300                  */
301                 if (resp->matching_scheme !=
302                     HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID) {
303                         resp->matching_scheme =
304                                 HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID;
305                         hns3_info(hw, "detect mailbox support match id!");
306                 }
307                 if (req->match_id == resp->match_id) {
308                         resp->resp_status = hns3_resp_to_errno(req->msg[3]);
309                         memcpy(resp->additional_info, &req->msg[4],
310                                HNS3_MBX_MAX_RESP_DATA_SIZE);
311                         rte_io_wmb();
312                         resp->received_match_resp = true;
313                 }
314                 return;
315         }
316
317         /*
318          * If the below instructions can be executed, it means PF does not
319          * support copy request's match_id to its response. So VF follows the
320          * original scheme to process.
321          */
322         resp->resp_status = hns3_resp_to_errno(req->msg[3]);
323         memcpy(resp->additional_info, &req->msg[4],
324                HNS3_MBX_MAX_RESP_DATA_SIZE);
325         msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
326         hns3_update_resp_position(hw, msg_data);
327 }
328
329 static void
330 hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
331 {
332         switch (link_fail_code) {
333         case HNS3_MBX_LF_NORMAL:
334                 break;
335         case HNS3_MBX_LF_REF_CLOCK_LOST:
336                 hns3_warn(hw, "Reference clock lost!");
337                 break;
338         case HNS3_MBX_LF_XSFP_TX_DISABLE:
339                 hns3_warn(hw, "SFP tx is disabled!");
340                 break;
341         case HNS3_MBX_LF_XSFP_ABSENT:
342                 hns3_warn(hw, "SFP is absent!");
343                 break;
344         default:
345                 hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
346                 break;
347         }
348 }
349
350 static void
351 hns3pf_handle_link_change_event(struct hns3_hw *hw,
352                                 struct hns3_mbx_vf_to_pf_cmd *req)
353 {
354 #define LINK_STATUS_OFFSET     1
355 #define LINK_FAIL_CODE_OFFSET  2
356
357         if (!req->msg[LINK_STATUS_OFFSET])
358                 hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
359
360         hns3_update_linkstatus_and_event(hw, true);
361 }
362
363 static void
364 hns3_update_port_base_vlan_info(struct hns3_hw *hw,
365                                 struct hns3_mbx_pf_to_vf_cmd *req)
366 {
367 #define PVID_STATE_OFFSET       1
368         uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
369                 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
370         /*
371          * Currently, hardware doesn't support more than two layers VLAN offload
372          * based on hns3 network engine, which would cause packets loss or wrong
373          * packets for these types of packets. If the hns3 PF kernel ethdev
374          * driver sets the PVID for VF device after initialization of the
375          * related VF device, the PF driver will notify VF driver to update the
376          * PVID configuration state. The VF driver will update the PVID
377          * configuration state immediately to ensure that the VLAN process in Tx
378          * and Rx is correct. But in the window period of this state transition,
379          * packets loss or packets with wrong VLAN may occur.
380          */
381         if (hw->port_base_vlan_cfg.state != new_pvid_state) {
382                 hw->port_base_vlan_cfg.state = new_pvid_state;
383                 hns3_update_all_queues_pvid_proc_en(hw);
384         }
385 }
386
387 static void
388 hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
389 {
390         if (!promisc_en) {
391                 /*
392                  * When promisc/allmulti mode is closed by the hns3 PF kernel
393                  * ethdev driver for untrusted, modify VF's related status.
394                  */
395                 hns3_warn(hw, "Promisc mode will be closed by host for being "
396                               "untrusted.");
397                 hw->data->promiscuous = 0;
398                 hw->data->all_multicast = 0;
399         }
400 }
401
402 static void
403 hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw)
404 {
405         struct hns3_cmq_ring *crq = &hw->cmq.crq;
406         struct hns3_mbx_pf_to_vf_cmd *req;
407         struct hns3_cmd_desc *desc;
408         uint32_t tail, next_to_use;
409         uint8_t opcode;
410         uint16_t flag;
411
412         tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
413         next_to_use = crq->next_to_use;
414         while (next_to_use != tail) {
415                 desc = &crq->desc[next_to_use];
416                 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
417                 opcode = req->msg[0] & 0xff;
418
419                 flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag);
420                 if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))
421                         goto scan_next;
422
423                 if (crq->desc[next_to_use].opcode == 0)
424                         goto scan_next;
425
426                 if (opcode == HNS3_MBX_PF_VF_RESP) {
427                         hns3_handle_mbx_response(hw, req);
428                         /*
429                          * Clear opcode to inform intr thread don't process
430                          * again.
431                          */
432                         crq->desc[crq->next_to_use].opcode = 0;
433                 }
434
435 scan_next:
436                 next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num;
437         }
438
439         crq->next_to_use = next_to_use;
440         hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
441 }
442
443 void
444 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
445 {
446         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
447         struct hns3_cmq_ring *crq = &hw->cmq.crq;
448         struct hns3_mbx_pf_to_vf_cmd *req;
449         struct hns3_cmd_desc *desc;
450         bool handle_out;
451         uint8_t opcode;
452         uint16_t flag;
453
454         rte_spinlock_lock(&hw->cmq.crq.lock);
455
456         handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY ||
457                       !rte_thread_is_intr()) && hns->is_vf;
458         if (handle_out) {
459                 /*
460                  * Currently, any threads in the primary and secondary processes
461                  * could send mailbox sync request, so it will need to process
462                  * the crq message (which is the HNS3_MBX_PF_VF_RESP) in there
463                  * own thread context. It may also process other messages
464                  * because it uses the policy of processing all pending messages
465                  * at once.
466                  * But some messages such as HNS3_MBX_PUSH_LINK_STATUS could
467                  * only process within the intr thread in primary process,
468                  * otherwise it may lead to report lsc event in secondary
469                  * process.
470                  * So the threads other than intr thread in primary process
471                  * could only process HNS3_MBX_PF_VF_RESP message, if the
472                  * message processed, its opcode will rewrite with zero, then
473                  * the intr thread in primary process will not process again.
474                  */
475                 hns3_handle_mbx_msg_out_intr(hw);
476                 rte_spinlock_unlock(&hw->cmq.crq.lock);
477                 return;
478         }
479
480         while (!hns3_cmd_crq_empty(hw)) {
481                 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
482                         rte_spinlock_unlock(&hw->cmq.crq.lock);
483                         return;
484                 }
485
486                 desc = &crq->desc[crq->next_to_use];
487                 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
488                 opcode = req->msg[0] & 0xff;
489
490                 flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
491                 if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
492                         hns3_warn(hw,
493                                   "dropped invalid mailbox message, code = %u",
494                                   opcode);
495
496                         /* dropping/not processing this invalid message */
497                         crq->desc[crq->next_to_use].flag = 0;
498                         hns3_mbx_ring_ptr_move_crq(crq);
499                         continue;
500                 }
501
502                 handle_out = hns->is_vf && desc->opcode == 0;
503                 if (handle_out) {
504                         /* Message already processed by other thread */
505                         crq->desc[crq->next_to_use].flag = 0;
506                         hns3_mbx_ring_ptr_move_crq(crq);
507                         continue;
508                 }
509
510                 switch (opcode) {
511                 case HNS3_MBX_PF_VF_RESP:
512                         hns3_handle_mbx_response(hw, req);
513                         break;
514                 case HNS3_MBX_LINK_STAT_CHANGE:
515                         hns3vf_handle_link_change_event(hw, req);
516                         break;
517                 case HNS3_MBX_ASSERTING_RESET:
518                         hns3_handle_asserting_reset(hw, req);
519                         break;
520                 case HNS3_MBX_PUSH_LINK_STATUS:
521                         /*
522                          * This message is reported by the firmware and is
523                          * reported in 'struct hns3_mbx_vf_to_pf_cmd' format.
524                          * Therefore, we should cast the req variable to
525                          * 'struct hns3_mbx_vf_to_pf_cmd' and then process it.
526                          */
527                         hns3pf_handle_link_change_event(hw,
528                                 (struct hns3_mbx_vf_to_pf_cmd *)req);
529                         break;
530                 case HNS3_MBX_PUSH_VLAN_INFO:
531                         /*
532                          * When the PVID configuration status of VF device is
533                          * changed by the hns3 PF kernel driver, VF driver will
534                          * receive this mailbox message from PF driver.
535                          */
536                         hns3_update_port_base_vlan_info(hw, req);
537                         break;
538                 case HNS3_MBX_PUSH_PROMISC_INFO:
539                         /*
540                          * When the trust status of VF device changed by the
541                          * hns3 PF kernel driver, VF driver will receive this
542                          * mailbox message from PF driver.
543                          */
544                         hns3_handle_promisc_info(hw, req->msg[1]);
545                         break;
546                 default:
547                         hns3_err(hw, "received unsupported(%u) mbx msg",
548                                  opcode);
549                         break;
550                 }
551
552                 crq->desc[crq->next_to_use].flag = 0;
553                 hns3_mbx_ring_ptr_move_crq(crq);
554         }
555
556         /* Write back CMDQ_RQ header pointer, IMP need this pointer */
557         hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
558
559         rte_spinlock_unlock(&hw->cmq.crq.lock);
560 }