net/avf: support stats
[dpdk.git] / drivers / net / avf / avf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
18 #include <rte_pci.h>
19 #include <rte_atomic.h>
20 #include <rte_eal.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev.h>
23 #include <rte_ethdev_pci.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
26 #include <rte_dev.h>
27
28 #include "avf_log.h"
29 #include "base/avf_prototype.h"
30 #include "base/avf_adminq_cmd.h"
31 #include "base/avf_type.h"
32
33 #include "avf.h"
34 #include "avf_rxtx.h"
35
36 static int avf_dev_configure(struct rte_eth_dev *dev);
37 static int avf_dev_start(struct rte_eth_dev *dev);
38 static void avf_dev_stop(struct rte_eth_dev *dev);
39 static void avf_dev_close(struct rte_eth_dev *dev);
40 static void avf_dev_info_get(struct rte_eth_dev *dev,
41                              struct rte_eth_dev_info *dev_info);
42 static const uint32_t *avf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
43 static int avf_dev_stats_get(struct rte_eth_dev *dev,
44                              struct rte_eth_stats *stats);
45
46 int avf_logtype_init;
47 int avf_logtype_driver;
48 static const struct rte_pci_id pci_id_avf_map[] = {
49         { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
50         { .vendor_id = 0, /* sentinel */ },
51 };
52
53 static const struct eth_dev_ops avf_eth_dev_ops = {
54         .dev_configure              = avf_dev_configure,
55         .dev_start                  = avf_dev_start,
56         .dev_stop                   = avf_dev_stop,
57         .dev_close                  = avf_dev_close,
58         .dev_infos_get              = avf_dev_info_get,
59         .dev_supported_ptypes_get   = avf_dev_supported_ptypes_get,
60         .link_update                = avf_dev_link_update,
61         .stats_get                  = avf_dev_stats_get,
62         .rx_queue_start             = avf_dev_rx_queue_start,
63         .rx_queue_stop              = avf_dev_rx_queue_stop,
64         .tx_queue_start             = avf_dev_tx_queue_start,
65         .tx_queue_stop              = avf_dev_tx_queue_stop,
66         .rx_queue_setup             = avf_dev_rx_queue_setup,
67         .rx_queue_release           = avf_dev_rx_queue_release,
68         .tx_queue_setup             = avf_dev_tx_queue_setup,
69         .tx_queue_release           = avf_dev_tx_queue_release,
70 };
71
72 static int
73 avf_dev_configure(struct rte_eth_dev *dev)
74 {
75         struct avf_adapter *ad =
76                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
77         struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(ad);
78         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
79
80         /* Vlan stripping setting */
81         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
82                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
83                         avf_enable_vlan_strip(ad);
84                 else
85                         avf_disable_vlan_strip(ad);
86         }
87         return 0;
88 }
89
90 static int
91 avf_init_rss(struct avf_adapter *adapter)
92 {
93         struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(adapter);
94         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
95         struct rte_eth_rss_conf *rss_conf;
96         uint8_t i, j, nb_q;
97         int ret;
98
99         rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
100         nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
101                        AVF_MAX_NUM_QUEUES);
102
103         if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
104                 PMD_DRV_LOG(DEBUG, "RSS is not supported");
105                 return -ENOTSUP;
106         }
107         if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
108                 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
109                 /* set all lut items to default queue */
110                 for (i = 0; i < vf->vf_res->rss_lut_size; i++)
111                         vf->rss_lut[i] = 0;
112                 ret = avf_configure_rss_lut(adapter);
113                 return ret;
114         }
115
116         /* In AVF, RSS enablement is set by PF driver. It is not supported
117          * to set based on rss_conf->rss_hf.
118          */
119
120         /* configure RSS key */
121         if (!rss_conf->rss_key) {
122                 /* Calculate the default hash key */
123                 for (i = 0; i <= vf->vf_res->rss_key_size; i++)
124                         vf->rss_key[i] = (uint8_t)rte_rand();
125         } else
126                 rte_memcpy(vf->rss_key, rss_conf->rss_key,
127                            RTE_MIN(rss_conf->rss_key_len,
128                                    vf->vf_res->rss_key_size));
129
130         /* init RSS LUT table */
131         for (i = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
132                 if (j >= nb_q)
133                         j = 0;
134                 vf->rss_lut[i] = j;
135         }
136         /* send virtchnnl ops to configure rss*/
137         ret = avf_configure_rss_lut(adapter);
138         if (ret)
139                 return ret;
140         ret = avf_configure_rss_key(adapter);
141         if (ret)
142                 return ret;
143
144         return 0;
145 }
146
147 static int
148 avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
149 {
150         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
151         struct rte_eth_dev_data *dev_data = dev->data;
152         uint16_t buf_size, max_pkt_len, len;
153
154         buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
155
156         /* Calculate the maximum packet length allowed */
157         len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS;
158         max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
159
160         /* Check if the jumbo frame and maximum packet length are set
161          * correctly.
162          */
163         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
164                 if (max_pkt_len <= ETHER_MAX_LEN ||
165                     max_pkt_len > AVF_FRAME_SIZE_MAX) {
166                         PMD_DRV_LOG(ERR, "maximum packet length must be "
167                                     "larger than %u and smaller than %u, "
168                                     "as jumbo frame is enabled",
169                                     (uint32_t)ETHER_MAX_LEN,
170                                     (uint32_t)AVF_FRAME_SIZE_MAX);
171                         return -EINVAL;
172                 }
173         } else {
174                 if (max_pkt_len < ETHER_MIN_LEN ||
175                     max_pkt_len > ETHER_MAX_LEN) {
176                         PMD_DRV_LOG(ERR, "maximum packet length must be "
177                                     "larger than %u and smaller than %u, "
178                                     "as jumbo frame is disabled",
179                                     (uint32_t)ETHER_MIN_LEN,
180                                     (uint32_t)ETHER_MAX_LEN);
181                         return -EINVAL;
182                 }
183         }
184
185         rxq->max_pkt_len = max_pkt_len;
186         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
187             (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
188                 dev_data->scattered_rx = 1;
189         }
190         AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
191         AVF_WRITE_FLUSH(hw);
192
193         return 0;
194 }
195
196 static int
197 avf_init_queues(struct rte_eth_dev *dev)
198 {
199         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
200         struct avf_rx_queue **rxq =
201                 (struct avf_rx_queue **)dev->data->rx_queues;
202         struct avf_tx_queue **txq =
203                 (struct avf_tx_queue **)dev->data->tx_queues;
204         int i, ret = AVF_SUCCESS;
205
206         for (i = 0; i < dev->data->nb_rx_queues; i++) {
207                 if (!rxq[i] || !rxq[i]->q_set)
208                         continue;
209                 ret = avf_init_rxq(dev, rxq[i]);
210                 if (ret != AVF_SUCCESS)
211                         break;
212         }
213         /* set rx/tx function to vector/scatter/single-segment
214          * according to parameters
215          */
216         avf_set_rx_function(dev);
217         avf_set_tx_function(dev);
218
219         return ret;
220 }
221
222 static int
223 avf_start_queues(struct rte_eth_dev *dev)
224 {
225         struct avf_rx_queue *rxq;
226         struct avf_tx_queue *txq;
227         int i;
228
229         for (i = 0; i < dev->data->nb_tx_queues; i++) {
230                 txq = dev->data->tx_queues[i];
231                 if (txq->tx_deferred_start)
232                         continue;
233                 if (avf_dev_tx_queue_start(dev, i) != 0) {
234                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
235                         return -1;
236                 }
237         }
238
239         for (i = 0; i < dev->data->nb_rx_queues; i++) {
240                 rxq = dev->data->rx_queues[i];
241                 if (rxq->rx_deferred_start)
242                         continue;
243                 if (avf_dev_rx_queue_start(dev, i) != 0) {
244                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
245                         return -1;
246                 }
247         }
248
249         return 0;
250 }
251
252 static int
253 avf_dev_start(struct rte_eth_dev *dev)
254 {
255         struct avf_adapter *adapter =
256                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
257         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
258         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
259         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
260         struct rte_intr_handle *intr_handle = dev->intr_handle;
261         uint16_t interval;
262         int i;
263
264         PMD_INIT_FUNC_TRACE();
265
266         hw->adapter_stopped = 0;
267
268         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
269         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
270                                       dev->data->nb_tx_queues);
271
272         /* TODO: Rx interrupt */
273
274         if (avf_init_queues(dev) != 0) {
275                 PMD_DRV_LOG(ERR, "failed to do Queue init");
276                 return -1;
277         }
278
279         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
280                 if (avf_init_rss(adapter) != 0) {
281                         PMD_DRV_LOG(ERR, "configure rss failed");
282                         goto err_rss;
283                 }
284         }
285
286         if (avf_configure_queues(adapter) != 0) {
287                 PMD_DRV_LOG(ERR, "configure queues failed");
288                 goto err_queue;
289         }
290
291         /* Map interrupt for writeback */
292         vf->nb_msix = 1;
293         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
294                 /* If WB_ON_ITR supports, enable it */
295                 vf->msix_base = AVF_RX_VEC_START;
296                 AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1),
297                               AVFINT_DYN_CTLN1_ITR_INDX_MASK |
298                               AVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
299         } else {
300                 /* If no WB_ON_ITR offload flags, need to set interrupt for
301                  * descriptor write back.
302                  */
303                 vf->msix_base = AVF_MISC_VEC_ID;
304
305                 /* set ITR to max */
306                 interval = avf_calc_itr_interval(AVF_QUEUE_ITR_INTERVAL_MAX);
307                 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
308                               AVFINT_DYN_CTL01_INTENA_MASK |
309                               (AVF_ITR_INDEX_DEFAULT <<
310                                AVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
311                               (interval << AVFINT_DYN_CTL01_INTERVAL_SHIFT));
312         }
313         AVF_WRITE_FLUSH(hw);
314         /* map all queues to the same interrupt */
315         for (i = 0; i < dev->data->nb_rx_queues; i++)
316                 vf->rxq_map[0] |= 1 << i;
317         if (avf_config_irq_map(adapter)) {
318                 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
319                 goto err_queue;
320         }
321
322         /* Set all mac addrs */
323         avf_add_del_all_mac_addr(adapter, TRUE);
324
325         if (avf_start_queues(dev) != 0) {
326                 PMD_DRV_LOG(ERR, "enable queues failed");
327                 goto err_mac;
328         }
329
330         /* TODO: enable interrupt for RX interrupt */
331         return 0;
332
333 err_mac:
334         avf_add_del_all_mac_addr(adapter, FALSE);
335 err_queue:
336 err_rss:
337         return -1;
338 }
339
340 static void
341 avf_dev_stop(struct rte_eth_dev *dev)
342 {
343         struct avf_adapter *adapter =
344                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
345         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
346         int ret, i;
347
348         PMD_INIT_FUNC_TRACE();
349
350         if (hw->adapter_stopped == 1)
351                 return;
352
353         avf_stop_queues(dev);
354
355         /*TODO: Disable the interrupt for Rx*/
356
357         /* TODO: Rx interrupt vector mapping free */
358
359         /* remove all mac addrs */
360         avf_add_del_all_mac_addr(adapter, FALSE);
361         hw->adapter_stopped = 1;
362 }
363
364 static void
365 avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
366 {
367         struct avf_adapter *adapter =
368                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
369         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
370
371         memset(dev_info, 0, sizeof(*dev_info));
372         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
373         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
374         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
375         dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
376         dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
377         dev_info->hash_key_size = vf->vf_res->rss_key_size;
378         dev_info->reta_size = vf->vf_res->rss_lut_size;
379         dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
380         dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
381         dev_info->rx_offload_capa =
382                 DEV_RX_OFFLOAD_VLAN_STRIP |
383                 DEV_RX_OFFLOAD_IPV4_CKSUM |
384                 DEV_RX_OFFLOAD_UDP_CKSUM |
385                 DEV_RX_OFFLOAD_TCP_CKSUM;
386         dev_info->tx_offload_capa =
387                 DEV_TX_OFFLOAD_VLAN_INSERT |
388                 DEV_TX_OFFLOAD_IPV4_CKSUM |
389                 DEV_TX_OFFLOAD_UDP_CKSUM |
390                 DEV_TX_OFFLOAD_TCP_CKSUM |
391                 DEV_TX_OFFLOAD_SCTP_CKSUM |
392                 DEV_TX_OFFLOAD_TCP_TSO;
393
394         dev_info->default_rxconf = (struct rte_eth_rxconf) {
395                 .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
396                 .rx_drop_en = 0,
397         };
398
399         dev_info->default_txconf = (struct rte_eth_txconf) {
400                 .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
401                 .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
402                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
403                                 ETH_TXQ_FLAGS_NOOFFLOADS,
404         };
405
406         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
407                 .nb_max = AVF_MAX_RING_DESC,
408                 .nb_min = AVF_MIN_RING_DESC,
409                 .nb_align = AVF_ALIGN_RING_DESC,
410         };
411
412         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
413                 .nb_max = AVF_MAX_RING_DESC,
414                 .nb_min = AVF_MIN_RING_DESC,
415                 .nb_align = AVF_ALIGN_RING_DESC,
416         };
417 }
418
419 static const uint32_t *
420 avf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
421 {
422         static const uint32_t ptypes[] = {
423                 RTE_PTYPE_L2_ETHER,
424                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
425                 RTE_PTYPE_L4_FRAG,
426                 RTE_PTYPE_L4_ICMP,
427                 RTE_PTYPE_L4_NONFRAG,
428                 RTE_PTYPE_L4_SCTP,
429                 RTE_PTYPE_L4_TCP,
430                 RTE_PTYPE_L4_UDP,
431                 RTE_PTYPE_UNKNOWN
432         };
433         return ptypes;
434 }
435
436 int
437 avf_dev_link_update(struct rte_eth_dev *dev,
438                     __rte_unused int wait_to_complete)
439 {
440         struct rte_eth_link new_link;
441         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
442
443         /* Only read status info stored in VF, and the info is updated
444          *  when receive LINK_CHANGE evnet from PF by Virtchnnl.
445          */
446         switch (vf->link_speed) {
447         case VIRTCHNL_LINK_SPEED_100MB:
448                 new_link.link_speed = ETH_SPEED_NUM_100M;
449                 break;
450         case VIRTCHNL_LINK_SPEED_1GB:
451                 new_link.link_speed = ETH_SPEED_NUM_1G;
452                 break;
453         case VIRTCHNL_LINK_SPEED_10GB:
454                 new_link.link_speed = ETH_SPEED_NUM_10G;
455                 break;
456         case VIRTCHNL_LINK_SPEED_20GB:
457                 new_link.link_speed = ETH_SPEED_NUM_20G;
458                 break;
459         case VIRTCHNL_LINK_SPEED_25GB:
460                 new_link.link_speed = ETH_SPEED_NUM_25G;
461                 break;
462         case VIRTCHNL_LINK_SPEED_40GB:
463                 new_link.link_speed = ETH_SPEED_NUM_40G;
464                 break;
465         default:
466                 new_link.link_speed = ETH_SPEED_NUM_NONE;
467                 break;
468         }
469
470         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
471         new_link.link_status = vf->link_up ? ETH_LINK_UP :
472                                              ETH_LINK_DOWN;
473         new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds &
474                                 ETH_LINK_SPEED_FIXED);
475
476         rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
477                             *(uint64_t *)&dev->data->dev_link,
478                             *(uint64_t *)&new_link);
479
480         return 0;
481 }
482
483 static int
484 avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
485 {
486         struct avf_adapter *adapter =
487                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
488         struct virtchnl_eth_stats *pstats = NULL;
489         int ret;
490
491         ret = avf_query_stats(adapter, &pstats);
492         if (ret == 0) {
493                 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
494                                                 pstats->rx_broadcast;
495                 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
496                                                 pstats->tx_unicast;
497                 stats->imissed = pstats->rx_discards;
498                 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
499                 stats->ibytes = pstats->rx_bytes;
500                 stats->obytes = pstats->tx_bytes;
501         } else {
502                 PMD_DRV_LOG(ERR, "Get statistics failed");
503         }
504         return -EIO;
505 }
506
507 static int
508 avf_check_vf_reset_done(struct avf_hw *hw)
509 {
510         int i, reset;
511
512         for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
513                 reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
514                         AVFGEN_RSTAT_VFR_STATE_MASK;
515                 reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
516                 if (reset == VIRTCHNL_VFR_VFACTIVE ||
517                     reset == VIRTCHNL_VFR_COMPLETED)
518                         break;
519                 rte_delay_ms(20);
520         }
521
522         if (i >= AVF_RESET_WAIT_CNT)
523                 return -1;
524
525         return 0;
526 }
527
528 static int
529 avf_init_vf(struct rte_eth_dev *dev)
530 {
531         int i, err, bufsz;
532         struct avf_adapter *adapter =
533                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
534         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
535         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
536
537         err = avf_set_mac_type(hw);
538         if (err) {
539                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
540                 goto err;
541         }
542
543         err = avf_check_vf_reset_done(hw);
544         if (err) {
545                 PMD_INIT_LOG(ERR, "VF is still resetting");
546                 goto err;
547         }
548
549         avf_init_adminq_parameter(hw);
550         err = avf_init_adminq(hw);
551         if (err) {
552                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
553                 goto err;
554         }
555
556         vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
557         if (!vf->aq_resp) {
558                 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
559                 goto err_aq;
560         }
561         if (avf_check_api_version(adapter) != 0) {
562                 PMD_INIT_LOG(ERR, "check_api version failed");
563                 goto err_api;
564         }
565
566         bufsz = sizeof(struct virtchnl_vf_resource) +
567                 (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
568         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
569         if (!vf->vf_res) {
570                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
571                 goto err_api;
572         }
573         if (avf_get_vf_resource(adapter) != 0) {
574                 PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
575                 goto err_alloc;
576         }
577         /* Allocate memort for RSS info */
578         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
579                 vf->rss_key = rte_zmalloc("rss_key",
580                                           vf->vf_res->rss_key_size, 0);
581                 if (!vf->rss_key) {
582                         PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
583                         goto err_rss;
584                 }
585                 vf->rss_lut = rte_zmalloc("rss_lut",
586                                           vf->vf_res->rss_lut_size, 0);
587                 if (!vf->rss_lut) {
588                         PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
589                         goto err_rss;
590                 }
591         }
592         return 0;
593 err_rss:
594         rte_free(vf->rss_key);
595         rte_free(vf->rss_lut);
596 err_alloc:
597         rte_free(vf->vf_res);
598         vf->vsi_res = NULL;
599 err_api:
600         rte_free(vf->aq_resp);
601 err_aq:
602         avf_shutdown_adminq(hw);
603 err:
604         return -1;
605 }
606
607 /* Enable default admin queue interrupt setting */
608 static inline void
609 avf_enable_irq0(struct avf_hw *hw)
610 {
611         /* Enable admin queue interrupt trigger */
612         AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
613
614         AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
615                                             AVFINT_DYN_CTL01_ITR_INDX_MASK);
616
617         AVF_WRITE_FLUSH(hw);
618 }
619
620 static inline void
621 avf_disable_irq0(struct avf_hw *hw)
622 {
623         /* Disable all interrupt types */
624         AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
625         AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
626                       AVFINT_DYN_CTL01_ITR_INDX_MASK);
627         AVF_WRITE_FLUSH(hw);
628 }
629
630 static void
631 avf_dev_interrupt_handler(void *param)
632 {
633         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
634         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
635
636         avf_disable_irq0(hw);
637
638         avf_handle_virtchnl_msg(dev);
639
640 done:
641         avf_enable_irq0(hw);
642 }
643
644 static int
645 avf_dev_init(struct rte_eth_dev *eth_dev)
646 {
647         struct avf_adapter *adapter =
648                 AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
649         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
650         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
651
652         PMD_INIT_FUNC_TRACE();
653
654         /* assign ops func pointer */
655         eth_dev->dev_ops = &avf_eth_dev_ops;
656         eth_dev->rx_pkt_burst = &avf_recv_pkts;
657         eth_dev->tx_pkt_burst = &avf_xmit_pkts;
658         eth_dev->tx_pkt_prepare = &avf_prep_pkts;
659
660         /* For secondary processes, we don't initialise any further as primary
661          * has already done this work. Only check if we need a different RX
662          * and TX function.
663          */
664         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
665                 avf_set_rx_function(eth_dev);
666                 avf_set_tx_function(eth_dev);
667                 return 0;
668         }
669         rte_eth_copy_pci_info(eth_dev, pci_dev);
670
671         hw->vendor_id = pci_dev->id.vendor_id;
672         hw->device_id = pci_dev->id.device_id;
673         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
674         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
675         hw->bus.bus_id = pci_dev->addr.bus;
676         hw->bus.device = pci_dev->addr.devid;
677         hw->bus.func = pci_dev->addr.function;
678         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
679         hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
680         adapter->eth_dev = eth_dev;
681
682         if (avf_init_vf(eth_dev) != 0) {
683                 PMD_INIT_LOG(ERR, "Init vf failed");
684                 return -1;
685         }
686
687         /* copy mac addr */
688         eth_dev->data->mac_addrs = rte_zmalloc(
689                                         "avf_mac",
690                                         ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
691                                         0);
692         if (!eth_dev->data->mac_addrs) {
693                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
694                              " store MAC addresses",
695                              ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
696                 return -ENOMEM;
697         }
698         /* If the MAC address is not configured by host,
699          * generate a random one.
700          */
701         if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
702                 eth_random_addr(hw->mac.addr);
703         ether_addr_copy((struct ether_addr *)hw->mac.addr,
704                         &eth_dev->data->mac_addrs[0]);
705
706         /* register callback func to eal lib */
707         rte_intr_callback_register(&pci_dev->intr_handle,
708                                    avf_dev_interrupt_handler,
709                                    (void *)eth_dev);
710
711         /* enable uio intr after callback register */
712         rte_intr_enable(&pci_dev->intr_handle);
713
714         /* configure and enable device interrupt */
715         avf_enable_irq0(hw);
716
717         return 0;
718 }
719
720 static void
721 avf_dev_close(struct rte_eth_dev *dev)
722 {
723         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
724         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
725         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
726
727         avf_dev_stop(dev);
728         avf_shutdown_adminq(hw);
729         /* disable uio intr before callback unregister */
730         rte_intr_disable(intr_handle);
731
732         /* unregister callback func from eal lib */
733         rte_intr_callback_unregister(intr_handle,
734                                      avf_dev_interrupt_handler, dev);
735         avf_disable_irq0(hw);
736 }
737
738 static int
739 avf_dev_uninit(struct rte_eth_dev *dev)
740 {
741         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
742         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
743
744         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
745                 return -EPERM;
746
747         dev->dev_ops = NULL;
748         dev->rx_pkt_burst = NULL;
749         dev->tx_pkt_burst = NULL;
750         if (hw->adapter_stopped == 0)
751                 avf_dev_close(dev);
752
753         rte_free(vf->vf_res);
754         vf->vsi_res = NULL;
755         vf->vf_res = NULL;
756
757         rte_free(vf->aq_resp);
758         vf->aq_resp = NULL;
759
760         rte_free(dev->data->mac_addrs);
761         dev->data->mac_addrs = NULL;
762
763         if (vf->rss_lut) {
764                 rte_free(vf->rss_lut);
765                 vf->rss_lut = NULL;
766         }
767         if (vf->rss_key) {
768                 rte_free(vf->rss_key);
769                 vf->rss_key = NULL;
770         }
771
772         return 0;
773 }
774
775 static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
776                              struct rte_pci_device *pci_dev)
777 {
778         return rte_eth_dev_pci_generic_probe(pci_dev,
779                 sizeof(struct avf_adapter), avf_dev_init);
780 }
781
782 static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
783 {
784         return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
785 }
786
787 /* Adaptive virtual function driver struct */
788 static struct rte_pci_driver rte_avf_pmd = {
789         .id_table = pci_id_avf_map,
790         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
791                      RTE_PCI_DRV_IOVA_AS_VA,
792         .probe = eth_avf_pci_probe,
793         .remove = eth_avf_pci_remove,
794 };
795
796 RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
797 RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
798 RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
799 RTE_INIT(avf_init_log);
800 static void
801 avf_init_log(void)
802 {
803         avf_logtype_init = rte_log_register("pmd.avf.init");
804         if (avf_logtype_init >= 0)
805                 rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
806         avf_logtype_driver = rte_log_register("pmd.avf.driver");
807         if (avf_logtype_driver >= 0)
808                 rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
809 }
810
811 /* memory func for base code */
812 enum avf_status_code
813 avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
814                        struct avf_dma_mem *mem,
815                        u64 size,
816                        u32 alignment)
817 {
818         const struct rte_memzone *mz = NULL;
819         char z_name[RTE_MEMZONE_NAMESIZE];
820
821         if (!mem)
822                 return AVF_ERR_PARAM;
823
824         snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
825         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
826                                          alignment, RTE_PGSIZE_2M);
827         if (!mz)
828                 return AVF_ERR_NO_MEMORY;
829
830         mem->size = size;
831         mem->va = mz->addr;
832         mem->pa = mz->phys_addr;
833         mem->zone = (const void *)mz;
834         PMD_DRV_LOG(DEBUG,
835                     "memzone %s allocated with physical address: %"PRIu64,
836                     mz->name, mem->pa);
837
838         return AVF_SUCCESS;
839 }
840
841 enum avf_status_code
842 avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
843                    struct avf_dma_mem *mem)
844 {
845         if (!mem)
846                 return AVF_ERR_PARAM;
847
848         PMD_DRV_LOG(DEBUG,
849                     "memzone %s to be freed with physical address: %"PRIu64,
850                     ((const struct rte_memzone *)mem->zone)->name, mem->pa);
851         rte_memzone_free((const struct rte_memzone *)mem->zone);
852         mem->zone = NULL;
853         mem->va = NULL;
854         mem->pa = (u64)0;
855
856         return AVF_SUCCESS;
857 }
858
859 enum avf_status_code
860 avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
861                         struct avf_virt_mem *mem,
862                         u32 size)
863 {
864         if (!mem)
865                 return AVF_ERR_PARAM;
866
867         mem->size = size;
868         mem->va = rte_zmalloc("avf", size, 0);
869
870         if (mem->va)
871                 return AVF_SUCCESS;
872         else
873                 return AVF_ERR_NO_MEMORY;
874 }
875
876 enum avf_status_code
877 avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
878                     struct avf_virt_mem *mem)
879 {
880         if (!mem)
881                 return AVF_ERR_PARAM;
882
883         rte_free(mem->va);
884         mem->va = NULL;
885
886         return AVF_SUCCESS;
887 }