net/avf: enable link status update
[dpdk.git] / drivers / net / avf / avf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
18 #include <rte_pci.h>
19 #include <rte_atomic.h>
20 #include <rte_eal.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev.h>
23 #include <rte_ethdev_pci.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
26 #include <rte_dev.h>
27
28 #include "avf_log.h"
29 #include "base/avf_prototype.h"
30 #include "base/avf_adminq_cmd.h"
31 #include "base/avf_type.h"
32
33 #include "avf.h"
34 #include "avf_rxtx.h"
35
36 static int avf_dev_configure(struct rte_eth_dev *dev);
37 static int avf_dev_start(struct rte_eth_dev *dev);
38 static void avf_dev_stop(struct rte_eth_dev *dev);
39 static void avf_dev_close(struct rte_eth_dev *dev);
40 static void avf_dev_info_get(struct rte_eth_dev *dev,
41                              struct rte_eth_dev_info *dev_info);
42 static const uint32_t *avf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
43
44 int avf_logtype_init;
45 int avf_logtype_driver;
46 static const struct rte_pci_id pci_id_avf_map[] = {
47         { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
48         { .vendor_id = 0, /* sentinel */ },
49 };
50
51 static const struct eth_dev_ops avf_eth_dev_ops = {
52         .dev_configure              = avf_dev_configure,
53         .dev_start                  = avf_dev_start,
54         .dev_stop                   = avf_dev_stop,
55         .dev_close                  = avf_dev_close,
56         .dev_infos_get              = avf_dev_info_get,
57         .dev_supported_ptypes_get   = avf_dev_supported_ptypes_get,
58         .link_update                = avf_dev_link_update,
59         .rx_queue_start             = avf_dev_rx_queue_start,
60         .rx_queue_stop              = avf_dev_rx_queue_stop,
61         .tx_queue_start             = avf_dev_tx_queue_start,
62         .tx_queue_stop              = avf_dev_tx_queue_stop,
63         .rx_queue_setup             = avf_dev_rx_queue_setup,
64         .rx_queue_release           = avf_dev_rx_queue_release,
65         .tx_queue_setup             = avf_dev_tx_queue_setup,
66         .tx_queue_release           = avf_dev_tx_queue_release,
67 };
68
69 static int
70 avf_dev_configure(struct rte_eth_dev *dev)
71 {
72         struct avf_adapter *ad =
73                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
74         struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(ad);
75         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
76
77         /* Vlan stripping setting */
78         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
79                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
80                         avf_enable_vlan_strip(ad);
81                 else
82                         avf_disable_vlan_strip(ad);
83         }
84         return 0;
85 }
86
87 static int
88 avf_init_rss(struct avf_adapter *adapter)
89 {
90         struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(adapter);
91         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
92         struct rte_eth_rss_conf *rss_conf;
93         uint8_t i, j, nb_q;
94         int ret;
95
96         rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
97         nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
98                        AVF_MAX_NUM_QUEUES);
99
100         if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
101                 PMD_DRV_LOG(DEBUG, "RSS is not supported");
102                 return -ENOTSUP;
103         }
104         if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
105                 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
106                 /* set all lut items to default queue */
107                 for (i = 0; i < vf->vf_res->rss_lut_size; i++)
108                         vf->rss_lut[i] = 0;
109                 ret = avf_configure_rss_lut(adapter);
110                 return ret;
111         }
112
113         /* In AVF, RSS enablement is set by PF driver. It is not supported
114          * to set based on rss_conf->rss_hf.
115          */
116
117         /* configure RSS key */
118         if (!rss_conf->rss_key) {
119                 /* Calculate the default hash key */
120                 for (i = 0; i <= vf->vf_res->rss_key_size; i++)
121                         vf->rss_key[i] = (uint8_t)rte_rand();
122         } else
123                 rte_memcpy(vf->rss_key, rss_conf->rss_key,
124                            RTE_MIN(rss_conf->rss_key_len,
125                                    vf->vf_res->rss_key_size));
126
127         /* init RSS LUT table */
128         for (i = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
129                 if (j >= nb_q)
130                         j = 0;
131                 vf->rss_lut[i] = j;
132         }
133         /* send virtchnnl ops to configure rss*/
134         ret = avf_configure_rss_lut(adapter);
135         if (ret)
136                 return ret;
137         ret = avf_configure_rss_key(adapter);
138         if (ret)
139                 return ret;
140
141         return 0;
142 }
143
144 static int
145 avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
146 {
147         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
148         struct rte_eth_dev_data *dev_data = dev->data;
149         uint16_t buf_size, max_pkt_len, len;
150
151         buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
152
153         /* Calculate the maximum packet length allowed */
154         len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS;
155         max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
156
157         /* Check if the jumbo frame and maximum packet length are set
158          * correctly.
159          */
160         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
161                 if (max_pkt_len <= ETHER_MAX_LEN ||
162                     max_pkt_len > AVF_FRAME_SIZE_MAX) {
163                         PMD_DRV_LOG(ERR, "maximum packet length must be "
164                                     "larger than %u and smaller than %u, "
165                                     "as jumbo frame is enabled",
166                                     (uint32_t)ETHER_MAX_LEN,
167                                     (uint32_t)AVF_FRAME_SIZE_MAX);
168                         return -EINVAL;
169                 }
170         } else {
171                 if (max_pkt_len < ETHER_MIN_LEN ||
172                     max_pkt_len > ETHER_MAX_LEN) {
173                         PMD_DRV_LOG(ERR, "maximum packet length must be "
174                                     "larger than %u and smaller than %u, "
175                                     "as jumbo frame is disabled",
176                                     (uint32_t)ETHER_MIN_LEN,
177                                     (uint32_t)ETHER_MAX_LEN);
178                         return -EINVAL;
179                 }
180         }
181
182         rxq->max_pkt_len = max_pkt_len;
183         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
184             (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
185                 dev_data->scattered_rx = 1;
186         }
187         AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
188         AVF_WRITE_FLUSH(hw);
189
190         return 0;
191 }
192
193 static int
194 avf_init_queues(struct rte_eth_dev *dev)
195 {
196         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
197         struct avf_rx_queue **rxq =
198                 (struct avf_rx_queue **)dev->data->rx_queues;
199         struct avf_tx_queue **txq =
200                 (struct avf_tx_queue **)dev->data->tx_queues;
201         int i, ret = AVF_SUCCESS;
202
203         for (i = 0; i < dev->data->nb_rx_queues; i++) {
204                 if (!rxq[i] || !rxq[i]->q_set)
205                         continue;
206                 ret = avf_init_rxq(dev, rxq[i]);
207                 if (ret != AVF_SUCCESS)
208                         break;
209         }
210         /* set rx/tx function to vector/scatter/single-segment
211          * according to parameters
212          */
213         avf_set_rx_function(dev);
214         avf_set_tx_function(dev);
215
216         return ret;
217 }
218
219 static int
220 avf_start_queues(struct rte_eth_dev *dev)
221 {
222         struct avf_rx_queue *rxq;
223         struct avf_tx_queue *txq;
224         int i;
225
226         for (i = 0; i < dev->data->nb_tx_queues; i++) {
227                 txq = dev->data->tx_queues[i];
228                 if (txq->tx_deferred_start)
229                         continue;
230                 if (avf_dev_tx_queue_start(dev, i) != 0) {
231                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
232                         return -1;
233                 }
234         }
235
236         for (i = 0; i < dev->data->nb_rx_queues; i++) {
237                 rxq = dev->data->rx_queues[i];
238                 if (rxq->rx_deferred_start)
239                         continue;
240                 if (avf_dev_rx_queue_start(dev, i) != 0) {
241                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
242                         return -1;
243                 }
244         }
245
246         return 0;
247 }
248
249 static int
250 avf_dev_start(struct rte_eth_dev *dev)
251 {
252         struct avf_adapter *adapter =
253                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
254         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
255         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
256         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
257         struct rte_intr_handle *intr_handle = dev->intr_handle;
258         uint16_t interval;
259         int i;
260
261         PMD_INIT_FUNC_TRACE();
262
263         hw->adapter_stopped = 0;
264
265         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
266         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
267                                       dev->data->nb_tx_queues);
268
269         /* TODO: Rx interrupt */
270
271         if (avf_init_queues(dev) != 0) {
272                 PMD_DRV_LOG(ERR, "failed to do Queue init");
273                 return -1;
274         }
275
276         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
277                 if (avf_init_rss(adapter) != 0) {
278                         PMD_DRV_LOG(ERR, "configure rss failed");
279                         goto err_rss;
280                 }
281         }
282
283         if (avf_configure_queues(adapter) != 0) {
284                 PMD_DRV_LOG(ERR, "configure queues failed");
285                 goto err_queue;
286         }
287
288         /* Map interrupt for writeback */
289         vf->nb_msix = 1;
290         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
291                 /* If WB_ON_ITR supports, enable it */
292                 vf->msix_base = AVF_RX_VEC_START;
293                 AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1),
294                               AVFINT_DYN_CTLN1_ITR_INDX_MASK |
295                               AVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
296         } else {
297                 /* If no WB_ON_ITR offload flags, need to set interrupt for
298                  * descriptor write back.
299                  */
300                 vf->msix_base = AVF_MISC_VEC_ID;
301
302                 /* set ITR to max */
303                 interval = avf_calc_itr_interval(AVF_QUEUE_ITR_INTERVAL_MAX);
304                 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
305                               AVFINT_DYN_CTL01_INTENA_MASK |
306                               (AVF_ITR_INDEX_DEFAULT <<
307                                AVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
308                               (interval << AVFINT_DYN_CTL01_INTERVAL_SHIFT));
309         }
310         AVF_WRITE_FLUSH(hw);
311         /* map all queues to the same interrupt */
312         for (i = 0; i < dev->data->nb_rx_queues; i++)
313                 vf->rxq_map[0] |= 1 << i;
314         if (avf_config_irq_map(adapter)) {
315                 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
316                 goto err_queue;
317         }
318
319         /* Set all mac addrs */
320         avf_add_del_all_mac_addr(adapter, TRUE);
321
322         if (avf_start_queues(dev) != 0) {
323                 PMD_DRV_LOG(ERR, "enable queues failed");
324                 goto err_mac;
325         }
326
327         /* TODO: enable interrupt for RX interrupt */
328         return 0;
329
330 err_mac:
331         avf_add_del_all_mac_addr(adapter, FALSE);
332 err_queue:
333 err_rss:
334         return -1;
335 }
336
337 static void
338 avf_dev_stop(struct rte_eth_dev *dev)
339 {
340         struct avf_adapter *adapter =
341                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
342         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
343         int ret, i;
344
345         PMD_INIT_FUNC_TRACE();
346
347         if (hw->adapter_stopped == 1)
348                 return;
349
350         avf_stop_queues(dev);
351
352         /*TODO: Disable the interrupt for Rx*/
353
354         /* TODO: Rx interrupt vector mapping free */
355
356         /* remove all mac addrs */
357         avf_add_del_all_mac_addr(adapter, FALSE);
358         hw->adapter_stopped = 1;
359 }
360
361 static void
362 avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
363 {
364         struct avf_adapter *adapter =
365                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
366         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
367
368         memset(dev_info, 0, sizeof(*dev_info));
369         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
370         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
371         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
372         dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
373         dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
374         dev_info->hash_key_size = vf->vf_res->rss_key_size;
375         dev_info->reta_size = vf->vf_res->rss_lut_size;
376         dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
377         dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
378         dev_info->rx_offload_capa =
379                 DEV_RX_OFFLOAD_VLAN_STRIP |
380                 DEV_RX_OFFLOAD_IPV4_CKSUM |
381                 DEV_RX_OFFLOAD_UDP_CKSUM |
382                 DEV_RX_OFFLOAD_TCP_CKSUM;
383         dev_info->tx_offload_capa =
384                 DEV_TX_OFFLOAD_VLAN_INSERT |
385                 DEV_TX_OFFLOAD_IPV4_CKSUM |
386                 DEV_TX_OFFLOAD_UDP_CKSUM |
387                 DEV_TX_OFFLOAD_TCP_CKSUM |
388                 DEV_TX_OFFLOAD_SCTP_CKSUM |
389                 DEV_TX_OFFLOAD_TCP_TSO;
390
391         dev_info->default_rxconf = (struct rte_eth_rxconf) {
392                 .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
393                 .rx_drop_en = 0,
394         };
395
396         dev_info->default_txconf = (struct rte_eth_txconf) {
397                 .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
398                 .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
399                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
400                                 ETH_TXQ_FLAGS_NOOFFLOADS,
401         };
402
403         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
404                 .nb_max = AVF_MAX_RING_DESC,
405                 .nb_min = AVF_MIN_RING_DESC,
406                 .nb_align = AVF_ALIGN_RING_DESC,
407         };
408
409         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
410                 .nb_max = AVF_MAX_RING_DESC,
411                 .nb_min = AVF_MIN_RING_DESC,
412                 .nb_align = AVF_ALIGN_RING_DESC,
413         };
414 }
415
416 static const uint32_t *
417 avf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
418 {
419         static const uint32_t ptypes[] = {
420                 RTE_PTYPE_L2_ETHER,
421                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
422                 RTE_PTYPE_L4_FRAG,
423                 RTE_PTYPE_L4_ICMP,
424                 RTE_PTYPE_L4_NONFRAG,
425                 RTE_PTYPE_L4_SCTP,
426                 RTE_PTYPE_L4_TCP,
427                 RTE_PTYPE_L4_UDP,
428                 RTE_PTYPE_UNKNOWN
429         };
430         return ptypes;
431 }
432
433 int
434 avf_dev_link_update(struct rte_eth_dev *dev,
435                     __rte_unused int wait_to_complete)
436 {
437         struct rte_eth_link new_link;
438         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
439
440         /* Only read status info stored in VF, and the info is updated
441          *  when receive LINK_CHANGE evnet from PF by Virtchnnl.
442          */
443         switch (vf->link_speed) {
444         case VIRTCHNL_LINK_SPEED_100MB:
445                 new_link.link_speed = ETH_SPEED_NUM_100M;
446                 break;
447         case VIRTCHNL_LINK_SPEED_1GB:
448                 new_link.link_speed = ETH_SPEED_NUM_1G;
449                 break;
450         case VIRTCHNL_LINK_SPEED_10GB:
451                 new_link.link_speed = ETH_SPEED_NUM_10G;
452                 break;
453         case VIRTCHNL_LINK_SPEED_20GB:
454                 new_link.link_speed = ETH_SPEED_NUM_20G;
455                 break;
456         case VIRTCHNL_LINK_SPEED_25GB:
457                 new_link.link_speed = ETH_SPEED_NUM_25G;
458                 break;
459         case VIRTCHNL_LINK_SPEED_40GB:
460                 new_link.link_speed = ETH_SPEED_NUM_40G;
461                 break;
462         default:
463                 new_link.link_speed = ETH_SPEED_NUM_NONE;
464                 break;
465         }
466
467         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
468         new_link.link_status = vf->link_up ? ETH_LINK_UP :
469                                              ETH_LINK_DOWN;
470         new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds &
471                                 ETH_LINK_SPEED_FIXED);
472
473         rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
474                             *(uint64_t *)&dev->data->dev_link,
475                             *(uint64_t *)&new_link);
476
477         return 0;
478 }
479
480 static int
481 avf_check_vf_reset_done(struct avf_hw *hw)
482 {
483         int i, reset;
484
485         for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
486                 reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
487                         AVFGEN_RSTAT_VFR_STATE_MASK;
488                 reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
489                 if (reset == VIRTCHNL_VFR_VFACTIVE ||
490                     reset == VIRTCHNL_VFR_COMPLETED)
491                         break;
492                 rte_delay_ms(20);
493         }
494
495         if (i >= AVF_RESET_WAIT_CNT)
496                 return -1;
497
498         return 0;
499 }
500
501 static int
502 avf_init_vf(struct rte_eth_dev *dev)
503 {
504         int i, err, bufsz;
505         struct avf_adapter *adapter =
506                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
507         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
508         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
509
510         err = avf_set_mac_type(hw);
511         if (err) {
512                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
513                 goto err;
514         }
515
516         err = avf_check_vf_reset_done(hw);
517         if (err) {
518                 PMD_INIT_LOG(ERR, "VF is still resetting");
519                 goto err;
520         }
521
522         avf_init_adminq_parameter(hw);
523         err = avf_init_adminq(hw);
524         if (err) {
525                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
526                 goto err;
527         }
528
529         vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
530         if (!vf->aq_resp) {
531                 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
532                 goto err_aq;
533         }
534         if (avf_check_api_version(adapter) != 0) {
535                 PMD_INIT_LOG(ERR, "check_api version failed");
536                 goto err_api;
537         }
538
539         bufsz = sizeof(struct virtchnl_vf_resource) +
540                 (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
541         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
542         if (!vf->vf_res) {
543                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
544                 goto err_api;
545         }
546         if (avf_get_vf_resource(adapter) != 0) {
547                 PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
548                 goto err_alloc;
549         }
550         /* Allocate memort for RSS info */
551         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
552                 vf->rss_key = rte_zmalloc("rss_key",
553                                           vf->vf_res->rss_key_size, 0);
554                 if (!vf->rss_key) {
555                         PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
556                         goto err_rss;
557                 }
558                 vf->rss_lut = rte_zmalloc("rss_lut",
559                                           vf->vf_res->rss_lut_size, 0);
560                 if (!vf->rss_lut) {
561                         PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
562                         goto err_rss;
563                 }
564         }
565         return 0;
566 err_rss:
567         rte_free(vf->rss_key);
568         rte_free(vf->rss_lut);
569 err_alloc:
570         rte_free(vf->vf_res);
571         vf->vsi_res = NULL;
572 err_api:
573         rte_free(vf->aq_resp);
574 err_aq:
575         avf_shutdown_adminq(hw);
576 err:
577         return -1;
578 }
579
580 /* Enable default admin queue interrupt setting */
581 static inline void
582 avf_enable_irq0(struct avf_hw *hw)
583 {
584         /* Enable admin queue interrupt trigger */
585         AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
586
587         AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
588                                             AVFINT_DYN_CTL01_ITR_INDX_MASK);
589
590         AVF_WRITE_FLUSH(hw);
591 }
592
593 static inline void
594 avf_disable_irq0(struct avf_hw *hw)
595 {
596         /* Disable all interrupt types */
597         AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
598         AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
599                       AVFINT_DYN_CTL01_ITR_INDX_MASK);
600         AVF_WRITE_FLUSH(hw);
601 }
602
603 static void
604 avf_dev_interrupt_handler(void *param)
605 {
606         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
607         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608
609         avf_disable_irq0(hw);
610
611         avf_handle_virtchnl_msg(dev);
612
613 done:
614         avf_enable_irq0(hw);
615 }
616
617 static int
618 avf_dev_init(struct rte_eth_dev *eth_dev)
619 {
620         struct avf_adapter *adapter =
621                 AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
622         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
623         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
624
625         PMD_INIT_FUNC_TRACE();
626
627         /* assign ops func pointer */
628         eth_dev->dev_ops = &avf_eth_dev_ops;
629         eth_dev->rx_pkt_burst = &avf_recv_pkts;
630         eth_dev->tx_pkt_burst = &avf_xmit_pkts;
631         eth_dev->tx_pkt_prepare = &avf_prep_pkts;
632
633         /* For secondary processes, we don't initialise any further as primary
634          * has already done this work. Only check if we need a different RX
635          * and TX function.
636          */
637         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
638                 avf_set_rx_function(eth_dev);
639                 avf_set_tx_function(eth_dev);
640                 return 0;
641         }
642         rte_eth_copy_pci_info(eth_dev, pci_dev);
643
644         hw->vendor_id = pci_dev->id.vendor_id;
645         hw->device_id = pci_dev->id.device_id;
646         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
647         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
648         hw->bus.bus_id = pci_dev->addr.bus;
649         hw->bus.device = pci_dev->addr.devid;
650         hw->bus.func = pci_dev->addr.function;
651         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
652         hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
653         adapter->eth_dev = eth_dev;
654
655         if (avf_init_vf(eth_dev) != 0) {
656                 PMD_INIT_LOG(ERR, "Init vf failed");
657                 return -1;
658         }
659
660         /* copy mac addr */
661         eth_dev->data->mac_addrs = rte_zmalloc(
662                                         "avf_mac",
663                                         ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
664                                         0);
665         if (!eth_dev->data->mac_addrs) {
666                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
667                              " store MAC addresses",
668                              ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
669                 return -ENOMEM;
670         }
671         /* If the MAC address is not configured by host,
672          * generate a random one.
673          */
674         if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
675                 eth_random_addr(hw->mac.addr);
676         ether_addr_copy((struct ether_addr *)hw->mac.addr,
677                         &eth_dev->data->mac_addrs[0]);
678
679         /* register callback func to eal lib */
680         rte_intr_callback_register(&pci_dev->intr_handle,
681                                    avf_dev_interrupt_handler,
682                                    (void *)eth_dev);
683
684         /* enable uio intr after callback register */
685         rte_intr_enable(&pci_dev->intr_handle);
686
687         /* configure and enable device interrupt */
688         avf_enable_irq0(hw);
689
690         return 0;
691 }
692
693 static void
694 avf_dev_close(struct rte_eth_dev *dev)
695 {
696         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
697         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
698         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
699
700         avf_dev_stop(dev);
701         avf_shutdown_adminq(hw);
702         /* disable uio intr before callback unregister */
703         rte_intr_disable(intr_handle);
704
705         /* unregister callback func from eal lib */
706         rte_intr_callback_unregister(intr_handle,
707                                      avf_dev_interrupt_handler, dev);
708         avf_disable_irq0(hw);
709 }
710
711 static int
712 avf_dev_uninit(struct rte_eth_dev *dev)
713 {
714         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
715         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
716
717         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
718                 return -EPERM;
719
720         dev->dev_ops = NULL;
721         dev->rx_pkt_burst = NULL;
722         dev->tx_pkt_burst = NULL;
723         if (hw->adapter_stopped == 0)
724                 avf_dev_close(dev);
725
726         rte_free(vf->vf_res);
727         vf->vsi_res = NULL;
728         vf->vf_res = NULL;
729
730         rte_free(vf->aq_resp);
731         vf->aq_resp = NULL;
732
733         rte_free(dev->data->mac_addrs);
734         dev->data->mac_addrs = NULL;
735
736         if (vf->rss_lut) {
737                 rte_free(vf->rss_lut);
738                 vf->rss_lut = NULL;
739         }
740         if (vf->rss_key) {
741                 rte_free(vf->rss_key);
742                 vf->rss_key = NULL;
743         }
744
745         return 0;
746 }
747
748 static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
749                              struct rte_pci_device *pci_dev)
750 {
751         return rte_eth_dev_pci_generic_probe(pci_dev,
752                 sizeof(struct avf_adapter), avf_dev_init);
753 }
754
755 static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
756 {
757         return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
758 }
759
760 /* Adaptive virtual function driver struct */
761 static struct rte_pci_driver rte_avf_pmd = {
762         .id_table = pci_id_avf_map,
763         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
764                      RTE_PCI_DRV_IOVA_AS_VA,
765         .probe = eth_avf_pci_probe,
766         .remove = eth_avf_pci_remove,
767 };
768
769 RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
770 RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
771 RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
772 RTE_INIT(avf_init_log);
773 static void
774 avf_init_log(void)
775 {
776         avf_logtype_init = rte_log_register("pmd.avf.init");
777         if (avf_logtype_init >= 0)
778                 rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
779         avf_logtype_driver = rte_log_register("pmd.avf.driver");
780         if (avf_logtype_driver >= 0)
781                 rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
782 }
783
784 /* memory func for base code */
785 enum avf_status_code
786 avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
787                        struct avf_dma_mem *mem,
788                        u64 size,
789                        u32 alignment)
790 {
791         const struct rte_memzone *mz = NULL;
792         char z_name[RTE_MEMZONE_NAMESIZE];
793
794         if (!mem)
795                 return AVF_ERR_PARAM;
796
797         snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
798         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
799                                          alignment, RTE_PGSIZE_2M);
800         if (!mz)
801                 return AVF_ERR_NO_MEMORY;
802
803         mem->size = size;
804         mem->va = mz->addr;
805         mem->pa = mz->phys_addr;
806         mem->zone = (const void *)mz;
807         PMD_DRV_LOG(DEBUG,
808                     "memzone %s allocated with physical address: %"PRIu64,
809                     mz->name, mem->pa);
810
811         return AVF_SUCCESS;
812 }
813
814 enum avf_status_code
815 avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
816                    struct avf_dma_mem *mem)
817 {
818         if (!mem)
819                 return AVF_ERR_PARAM;
820
821         PMD_DRV_LOG(DEBUG,
822                     "memzone %s to be freed with physical address: %"PRIu64,
823                     ((const struct rte_memzone *)mem->zone)->name, mem->pa);
824         rte_memzone_free((const struct rte_memzone *)mem->zone);
825         mem->zone = NULL;
826         mem->va = NULL;
827         mem->pa = (u64)0;
828
829         return AVF_SUCCESS;
830 }
831
832 enum avf_status_code
833 avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
834                         struct avf_virt_mem *mem,
835                         u32 size)
836 {
837         if (!mem)
838                 return AVF_ERR_PARAM;
839
840         mem->size = size;
841         mem->va = rte_zmalloc("avf", size, 0);
842
843         if (mem->va)
844                 return AVF_SUCCESS;
845         else
846                 return AVF_ERR_NO_MEMORY;
847 }
848
849 enum avf_status_code
850 avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
851                     struct avf_virt_mem *mem)
852 {
853         if (!mem)
854                 return AVF_ERR_PARAM;
855
856         rte_free(mem->va);
857         mem->va = NULL;
858
859         return AVF_SUCCESS;
860 }