net/ice: handle AdminQ command by DCF
[dpdk.git] / drivers / net / ice / ice_dcf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15
16 #include <rte_pci.h>
17 #include <rte_atomic.h>
18 #include <rte_eal.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_malloc.h>
23 #include <rte_memzone.h>
24 #include <rte_dev.h>
25
26 #include "ice_dcf.h"
27
28 #define ICE_DCF_AQ_LEN     32
29 #define ICE_DCF_AQ_BUF_SZ  4096
30
31 #define ICE_DCF_ARQ_MAX_RETRIES 200
32 #define ICE_DCF_ARQ_CHECK_TIME  2   /* msecs */
33
34 #define ICE_DCF_VF_RES_BUF_SZ   \
35         (sizeof(struct virtchnl_vf_resource) +  \
36                 IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
37
38 static __rte_always_inline int
39 ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
40                             uint8_t *req_msg, uint16_t req_msglen)
41 {
42         return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
43                                       req_msg, req_msglen, NULL);
44 }
45
46 static int
47 ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
48                             uint8_t *rsp_msgbuf, uint16_t rsp_buflen,
49                             uint16_t *rsp_msglen)
50 {
51         struct iavf_arq_event_info event;
52         enum virtchnl_ops v_op;
53         int i = 0;
54         int err;
55
56         event.buf_len = rsp_buflen;
57         event.msg_buf = rsp_msgbuf;
58
59         do {
60                 err = iavf_clean_arq_element(&hw->avf, &event, NULL);
61                 if (err != IAVF_SUCCESS)
62                         goto again;
63
64                 v_op = rte_le_to_cpu_32(event.desc.cookie_high);
65                 if (v_op != op)
66                         goto again;
67
68                 if (rsp_msglen != NULL)
69                         *rsp_msglen = event.msg_len;
70                 return rte_le_to_cpu_32(event.desc.cookie_low);
71
72 again:
73                 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
74         } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
75
76         return -EIO;
77 }
78
79 static __rte_always_inline void
80 ice_dcf_aq_cmd_clear(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
81 {
82         rte_spinlock_lock(&hw->vc_cmd_queue_lock);
83
84         TAILQ_REMOVE(&hw->vc_cmd_queue, cmd, next);
85
86         rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
87 }
88
89 static __rte_always_inline void
90 ice_dcf_vc_cmd_set(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
91 {
92         cmd->v_ret = IAVF_ERR_NOT_READY;
93         cmd->rsp_msglen = 0;
94         cmd->pending = 1;
95
96         rte_spinlock_lock(&hw->vc_cmd_queue_lock);
97
98         TAILQ_INSERT_TAIL(&hw->vc_cmd_queue, cmd, next);
99
100         rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
101 }
102
103 static __rte_always_inline int
104 ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
105 {
106         return iavf_aq_send_msg_to_pf(&hw->avf,
107                                       cmd->v_op, IAVF_SUCCESS,
108                                       cmd->req_msg, cmd->req_msglen, NULL);
109 }
110
111 static __rte_always_inline void
112 ice_dcf_aq_cmd_handle(struct ice_dcf_hw *hw, struct iavf_arq_event_info *info)
113 {
114         struct dcf_virtchnl_cmd *cmd;
115         enum virtchnl_ops v_op;
116         enum iavf_status v_ret;
117         uint16_t aq_op;
118
119         aq_op = rte_le_to_cpu_16(info->desc.opcode);
120         if (unlikely(aq_op != iavf_aqc_opc_send_msg_to_vf)) {
121                 PMD_DRV_LOG(ERR,
122                             "Request %u is not supported yet", aq_op);
123                 return;
124         }
125
126         v_op = rte_le_to_cpu_32(info->desc.cookie_high);
127         if (unlikely(v_op == VIRTCHNL_OP_EVENT))
128                 return;
129
130         v_ret = rte_le_to_cpu_32(info->desc.cookie_low);
131
132         rte_spinlock_lock(&hw->vc_cmd_queue_lock);
133
134         TAILQ_FOREACH(cmd, &hw->vc_cmd_queue, next) {
135                 if (cmd->v_op == v_op && cmd->pending) {
136                         cmd->v_ret = v_ret;
137                         cmd->rsp_msglen = RTE_MIN(info->msg_len,
138                                                   cmd->rsp_buflen);
139                         if (likely(cmd->rsp_msglen != 0))
140                                 rte_memcpy(cmd->rsp_msgbuf, info->msg_buf,
141                                            cmd->rsp_msglen);
142
143                         /* prevent compiler reordering */
144                         rte_compiler_barrier();
145                         cmd->pending = 0;
146                         break;
147                 }
148         }
149
150         rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
151 }
152
153 static void
154 ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw *hw)
155 {
156         struct iavf_arq_event_info info;
157         uint16_t pending = 1;
158         int ret;
159
160         info.buf_len = ICE_DCF_AQ_BUF_SZ;
161         info.msg_buf = hw->arq_buf;
162
163         while (pending) {
164                 ret = iavf_clean_arq_element(&hw->avf, &info, &pending);
165                 if (ret != IAVF_SUCCESS)
166                         break;
167
168                 ice_dcf_aq_cmd_handle(hw, &info);
169         }
170 }
171
172 static int
173 ice_dcf_init_check_api_version(struct ice_dcf_hw *hw)
174 {
175 #define ICE_CPF_VIRTCHNL_VERSION_MAJOR_START    1
176 #define ICE_CPF_VIRTCHNL_VERSION_MINOR_START    1
177         struct virtchnl_version_info version, *pver;
178         int err;
179
180         version.major = VIRTCHNL_VERSION_MAJOR;
181         version.minor = VIRTCHNL_VERSION_MINOR;
182         err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_VERSION,
183                                           (uint8_t *)&version, sizeof(version));
184         if (err) {
185                 PMD_INIT_LOG(ERR, "Failed to send OP_VERSION");
186                 return err;
187         }
188
189         pver = &hw->virtchnl_version;
190         err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_VERSION,
191                                           (uint8_t *)pver, sizeof(*pver), NULL);
192         if (err) {
193                 PMD_INIT_LOG(ERR, "Failed to get response of OP_VERSION");
194                 return -1;
195         }
196
197         PMD_INIT_LOG(DEBUG,
198                      "Peer PF API version: %u.%u", pver->major, pver->minor);
199
200         if (pver->major < ICE_CPF_VIRTCHNL_VERSION_MAJOR_START ||
201             (pver->major == ICE_CPF_VIRTCHNL_VERSION_MAJOR_START &&
202              pver->minor < ICE_CPF_VIRTCHNL_VERSION_MINOR_START)) {
203                 PMD_INIT_LOG(ERR,
204                              "VIRTCHNL API version should not be lower than (%u.%u)",
205                              ICE_CPF_VIRTCHNL_VERSION_MAJOR_START,
206                              ICE_CPF_VIRTCHNL_VERSION_MAJOR_START);
207                 return -1;
208         } else if (pver->major > VIRTCHNL_VERSION_MAJOR ||
209                    (pver->major == VIRTCHNL_VERSION_MAJOR &&
210                     pver->minor > VIRTCHNL_VERSION_MINOR)) {
211                 PMD_INIT_LOG(ERR,
212                              "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
213                              pver->major, pver->minor,
214                              VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
215                 return -1;
216         }
217
218         PMD_INIT_LOG(DEBUG, "Peer is supported PF host");
219
220         return 0;
221 }
222
223 static int
224 ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
225 {
226         uint32_t caps;
227         int err, i;
228
229         caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
230                VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
231                VF_BASE_MODE_OFFLOADS;
232
233         err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
234                                           (uint8_t *)&caps, sizeof(caps));
235         if (err) {
236                 PMD_DRV_LOG(ERR, "Failed to send msg OP_GET_VF_RESOURCE");
237                 return err;
238         }
239
240         err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
241                                           (uint8_t *)hw->vf_res,
242                                           ICE_DCF_VF_RES_BUF_SZ, NULL);
243         if (err) {
244                 PMD_DRV_LOG(ERR, "Failed to get response of OP_GET_VF_RESOURCE");
245                 return -1;
246         }
247
248         iavf_vf_parse_hw_config(&hw->avf, hw->vf_res);
249
250         hw->vsi_res = NULL;
251         for (i = 0; i < hw->vf_res->num_vsis; i++) {
252                 if (hw->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
253                         hw->vsi_res = &hw->vf_res->vsi_res[i];
254         }
255
256         if (!hw->vsi_res) {
257                 PMD_DRV_LOG(ERR, "no LAN VSI found");
258                 return -1;
259         }
260
261         hw->vsi_id = hw->vsi_res->vsi_id;
262         PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id);
263
264         return 0;
265 }
266
267 static int
268 ice_dcf_mode_disable(struct ice_dcf_hw *hw)
269 {
270         int err;
271
272         err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
273                                           NULL, 0);
274         if (err) {
275                 PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_DISABLE");
276                 return err;
277         }
278
279         err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
280                                           hw->arq_buf, ICE_DCF_AQ_BUF_SZ, NULL);
281         if (err) {
282                 PMD_DRV_LOG(ERR,
283                             "Failed to get response of OP_DCF_DISABLE %d",
284                             err);
285                 return -1;
286         }
287
288         return 0;
289 }
290
291 static int
292 ice_dcf_check_reset_done(struct ice_dcf_hw *hw)
293 {
294 #define ICE_DCF_RESET_WAIT_CNT       50
295         struct iavf_hw *avf = &hw->avf;
296         int i, reset;
297
298         for (i = 0; i < ICE_DCF_RESET_WAIT_CNT; i++) {
299                 reset = IAVF_READ_REG(avf, IAVF_VFGEN_RSTAT) &
300                                         IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
301                 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
302
303                 if (reset == VIRTCHNL_VFR_VFACTIVE ||
304                     reset == VIRTCHNL_VFR_COMPLETED)
305                         break;
306
307                 rte_delay_ms(20);
308         }
309
310         if (i >= ICE_DCF_RESET_WAIT_CNT)
311                 return -1;
312
313         return 0;
314 }
315
316 static inline void
317 ice_dcf_enable_irq0(struct ice_dcf_hw *hw)
318 {
319         struct iavf_hw *avf = &hw->avf;
320
321         /* Enable admin queue interrupt trigger */
322         IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1,
323                        IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
324         IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
325                        IAVF_VFINT_DYN_CTL01_INTENA_MASK |
326                        IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
327                        IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
328
329         IAVF_WRITE_FLUSH(avf);
330 }
331
332 static inline void
333 ice_dcf_disable_irq0(struct ice_dcf_hw *hw)
334 {
335         struct iavf_hw *avf = &hw->avf;
336
337         /* Disable all interrupt types */
338         IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, 0);
339         IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
340                        IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
341
342         IAVF_WRITE_FLUSH(avf);
343 }
344
345 static void
346 ice_dcf_dev_interrupt_handler(void *param)
347 {
348         struct ice_dcf_hw *hw = param;
349
350         ice_dcf_disable_irq0(hw);
351
352         ice_dcf_handle_virtchnl_msg(hw);
353
354         ice_dcf_enable_irq0(hw);
355 }
356
357 int
358 ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
359                              struct dcf_virtchnl_cmd *cmd)
360 {
361         int i = 0;
362         int err;
363
364         if ((cmd->req_msg && !cmd->req_msglen) ||
365             (!cmd->req_msg && cmd->req_msglen) ||
366             (cmd->rsp_msgbuf && !cmd->rsp_buflen) ||
367             (!cmd->rsp_msgbuf && cmd->rsp_buflen))
368                 return -EINVAL;
369
370         rte_spinlock_lock(&hw->vc_cmd_send_lock);
371         ice_dcf_vc_cmd_set(hw, cmd);
372
373         err = ice_dcf_vc_cmd_send(hw, cmd);
374         if (err) {
375                 PMD_DRV_LOG(ERR, "fail to send cmd %d", cmd->v_op);
376                 goto ret;
377         }
378
379         do {
380                 if (!cmd->pending)
381                         break;
382
383                 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
384         } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
385
386         if (cmd->v_ret != IAVF_SUCCESS) {
387                 err = -1;
388                 PMD_DRV_LOG(ERR,
389                             "No response (%d times) or return failure (%d) for cmd %d",
390                             i, cmd->v_ret, cmd->v_op);
391         }
392
393 ret:
394         ice_dcf_aq_cmd_clear(hw, cmd);
395         rte_spinlock_unlock(&hw->vc_cmd_send_lock);
396         return err;
397 }
398
399 int
400 ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
401                     void *buf, uint16_t buf_size)
402 {
403         struct dcf_virtchnl_cmd desc_cmd, buff_cmd;
404         struct ice_dcf_hw *hw = dcf_hw;
405         int err = 0;
406         int i = 0;
407
408         if ((buf && !buf_size) || (!buf && buf_size) ||
409             buf_size > ICE_DCF_AQ_BUF_SZ)
410                 return -EINVAL;
411
412         desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC;
413         desc_cmd.req_msglen = sizeof(*desc);
414         desc_cmd.req_msg = (uint8_t *)desc;
415         desc_cmd.rsp_buflen = sizeof(*desc);
416         desc_cmd.rsp_msgbuf = (uint8_t *)desc;
417
418         if (buf == NULL)
419                 return ice_dcf_execute_virtchnl_cmd(hw, &desc_cmd);
420
421         desc->flags |= rte_cpu_to_le_16(ICE_AQ_FLAG_BUF);
422
423         buff_cmd.v_op = VIRTCHNL_OP_DCF_CMD_BUFF;
424         buff_cmd.req_msglen = buf_size;
425         buff_cmd.req_msg = buf;
426         buff_cmd.rsp_buflen = buf_size;
427         buff_cmd.rsp_msgbuf = buf;
428
429         rte_spinlock_lock(&hw->vc_cmd_send_lock);
430         ice_dcf_vc_cmd_set(hw, &desc_cmd);
431         ice_dcf_vc_cmd_set(hw, &buff_cmd);
432
433         if (ice_dcf_vc_cmd_send(hw, &desc_cmd) ||
434             ice_dcf_vc_cmd_send(hw, &buff_cmd)) {
435                 err = -1;
436                 PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF");
437                 goto ret;
438         }
439
440         do {
441                 if ((!desc_cmd.pending && !buff_cmd.pending) ||
442                     (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) ||
443                     (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS))
444                         break;
445
446                 rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
447         } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
448
449         if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) {
450                 err = -1;
451                 PMD_DRV_LOG(ERR,
452                             "No response (%d times) or return failure (desc: %d / buff: %d)",
453                             i, desc_cmd.v_ret, buff_cmd.v_ret);
454         }
455
456 ret:
457         ice_dcf_aq_cmd_clear(hw, &desc_cmd);
458         ice_dcf_aq_cmd_clear(hw, &buff_cmd);
459         rte_spinlock_unlock(&hw->vc_cmd_send_lock);
460
461         return err;
462 }
463
464 int
465 ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
466 {
467         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
468         int ret;
469
470         hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
471         hw->avf.back = hw;
472
473         hw->avf.bus.bus_id = pci_dev->addr.bus;
474         hw->avf.bus.device = pci_dev->addr.devid;
475         hw->avf.bus.func = pci_dev->addr.function;
476
477         hw->avf.device_id = pci_dev->id.device_id;
478         hw->avf.vendor_id = pci_dev->id.vendor_id;
479         hw->avf.subsystem_device_id = pci_dev->id.subsystem_device_id;
480         hw->avf.subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
481
482         hw->avf.aq.num_arq_entries = ICE_DCF_AQ_LEN;
483         hw->avf.aq.num_asq_entries = ICE_DCF_AQ_LEN;
484         hw->avf.aq.arq_buf_size = ICE_DCF_AQ_BUF_SZ;
485         hw->avf.aq.asq_buf_size = ICE_DCF_AQ_BUF_SZ;
486
487         rte_spinlock_init(&hw->vc_cmd_send_lock);
488         rte_spinlock_init(&hw->vc_cmd_queue_lock);
489         TAILQ_INIT(&hw->vc_cmd_queue);
490
491         hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
492         if (hw->arq_buf == NULL) {
493                 PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
494                 goto err;
495         }
496
497         ret = iavf_set_mac_type(&hw->avf);
498         if (ret) {
499                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", ret);
500                 goto err;
501         }
502
503         ret = ice_dcf_check_reset_done(hw);
504         if (ret) {
505                 PMD_INIT_LOG(ERR, "VF is still resetting");
506                 goto err;
507         }
508
509         ret = iavf_init_adminq(&hw->avf);
510         if (ret) {
511                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", ret);
512                 goto err;
513         }
514
515         if (ice_dcf_init_check_api_version(hw)) {
516                 PMD_INIT_LOG(ERR, "check_api version failed");
517                 goto err_api;
518         }
519
520         hw->vf_res = rte_zmalloc("vf_res", ICE_DCF_VF_RES_BUF_SZ, 0);
521         if (hw->vf_res == NULL) {
522                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
523                 goto err_api;
524         }
525
526         if (ice_dcf_get_vf_resource(hw)) {
527                 PMD_INIT_LOG(ERR, "Failed to get VF resource");
528                 goto err_alloc;
529         }
530
531         rte_intr_callback_register(&pci_dev->intr_handle,
532                                    ice_dcf_dev_interrupt_handler, hw);
533         rte_intr_enable(&pci_dev->intr_handle);
534         ice_dcf_enable_irq0(hw);
535
536         return 0;
537
538 err_alloc:
539         rte_free(hw->vf_res);
540 err_api:
541         iavf_shutdown_adminq(&hw->avf);
542 err:
543         rte_free(hw->arq_buf);
544
545         return -1;
546 }
547
548 void
549 ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
550 {
551         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
552         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
553
554         ice_dcf_disable_irq0(hw);
555         rte_intr_disable(intr_handle);
556         rte_intr_callback_unregister(intr_handle,
557                                      ice_dcf_dev_interrupt_handler, hw);
558
559         ice_dcf_mode_disable(hw);
560         iavf_shutdown_adminq(&hw->avf);
561
562         rte_free(hw->arq_buf);
563         rte_free(hw->vf_res);
564 }