net/avf: enable queue and device
[dpdk.git] / drivers / net / avf / avf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
18 #include <rte_pci.h>
19 #include <rte_atomic.h>
20 #include <rte_eal.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev.h>
23 #include <rte_ethdev_pci.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
26 #include <rte_dev.h>
27
28 #include "avf_log.h"
29 #include "base/avf_prototype.h"
30 #include "base/avf_adminq_cmd.h"
31 #include "base/avf_type.h"
32
33 #include "avf.h"
34 #include "avf_rxtx.h"
35
36 static int avf_dev_configure(struct rte_eth_dev *dev);
37 static int avf_dev_start(struct rte_eth_dev *dev);
38 static void avf_dev_stop(struct rte_eth_dev *dev);
39 static void avf_dev_close(struct rte_eth_dev *dev);
40 static void avf_dev_info_get(struct rte_eth_dev *dev,
41                              struct rte_eth_dev_info *dev_info);
42
43 int avf_logtype_init;
44 int avf_logtype_driver;
45 static const struct rte_pci_id pci_id_avf_map[] = {
46         { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
47         { .vendor_id = 0, /* sentinel */ },
48 };
49
50 static const struct eth_dev_ops avf_eth_dev_ops = {
51         .dev_configure              = avf_dev_configure,
52         .dev_start                  = avf_dev_start,
53         .dev_stop                   = avf_dev_stop,
54         .dev_close                  = avf_dev_close,
55         .dev_infos_get              = avf_dev_info_get,
56         .rx_queue_start             = avf_dev_rx_queue_start,
57         .rx_queue_stop              = avf_dev_rx_queue_stop,
58         .tx_queue_start             = avf_dev_tx_queue_start,
59         .tx_queue_stop              = avf_dev_tx_queue_stop,
60         .rx_queue_setup             = avf_dev_rx_queue_setup,
61         .rx_queue_release           = avf_dev_rx_queue_release,
62         .tx_queue_setup             = avf_dev_tx_queue_setup,
63         .tx_queue_release           = avf_dev_tx_queue_release,
64 };
65
66 static int
67 avf_dev_configure(struct rte_eth_dev *dev)
68 {
69         struct avf_adapter *ad =
70                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
71         struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(ad);
72         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
73
74         /* Vlan stripping setting */
75         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
76                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
77                         avf_enable_vlan_strip(ad);
78                 else
79                         avf_disable_vlan_strip(ad);
80         }
81         return 0;
82 }
83
84 static int
85 avf_init_rss(struct avf_adapter *adapter)
86 {
87         struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(adapter);
88         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
89         struct rte_eth_rss_conf *rss_conf;
90         uint8_t i, j, nb_q;
91         int ret;
92
93         rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
94         nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
95                        AVF_MAX_NUM_QUEUES);
96
97         if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
98                 PMD_DRV_LOG(DEBUG, "RSS is not supported");
99                 return -ENOTSUP;
100         }
101         if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
102                 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
103                 /* set all lut items to default queue */
104                 for (i = 0; i < vf->vf_res->rss_lut_size; i++)
105                         vf->rss_lut[i] = 0;
106                 ret = avf_configure_rss_lut(adapter);
107                 return ret;
108         }
109
110         /* In AVF, RSS enablement is set by PF driver. It is not supported
111          * to set based on rss_conf->rss_hf.
112          */
113
114         /* configure RSS key */
115         if (!rss_conf->rss_key) {
116                 /* Calculate the default hash key */
117                 for (i = 0; i <= vf->vf_res->rss_key_size; i++)
118                         vf->rss_key[i] = (uint8_t)rte_rand();
119         } else
120                 rte_memcpy(vf->rss_key, rss_conf->rss_key,
121                            RTE_MIN(rss_conf->rss_key_len,
122                                    vf->vf_res->rss_key_size));
123
124         /* init RSS LUT table */
125         for (i = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
126                 if (j >= nb_q)
127                         j = 0;
128                 vf->rss_lut[i] = j;
129         }
130         /* send virtchnnl ops to configure rss*/
131         ret = avf_configure_rss_lut(adapter);
132         if (ret)
133                 return ret;
134         ret = avf_configure_rss_key(adapter);
135         if (ret)
136                 return ret;
137
138         return 0;
139 }
140
141 static int
142 avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
143 {
144         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
145         struct rte_eth_dev_data *dev_data = dev->data;
146         uint16_t buf_size, max_pkt_len, len;
147
148         buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
149
150         /* Calculate the maximum packet length allowed */
151         len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS;
152         max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
153
154         /* Check if the jumbo frame and maximum packet length are set
155          * correctly.
156          */
157         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
158                 if (max_pkt_len <= ETHER_MAX_LEN ||
159                     max_pkt_len > AVF_FRAME_SIZE_MAX) {
160                         PMD_DRV_LOG(ERR, "maximum packet length must be "
161                                     "larger than %u and smaller than %u, "
162                                     "as jumbo frame is enabled",
163                                     (uint32_t)ETHER_MAX_LEN,
164                                     (uint32_t)AVF_FRAME_SIZE_MAX);
165                         return -EINVAL;
166                 }
167         } else {
168                 if (max_pkt_len < ETHER_MIN_LEN ||
169                     max_pkt_len > ETHER_MAX_LEN) {
170                         PMD_DRV_LOG(ERR, "maximum packet length must be "
171                                     "larger than %u and smaller than %u, "
172                                     "as jumbo frame is disabled",
173                                     (uint32_t)ETHER_MIN_LEN,
174                                     (uint32_t)ETHER_MAX_LEN);
175                         return -EINVAL;
176                 }
177         }
178
179         rxq->max_pkt_len = max_pkt_len;
180         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
181             (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
182                 dev_data->scattered_rx = 1;
183         }
184         AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
185         AVF_WRITE_FLUSH(hw);
186
187         return 0;
188 }
189
190 static int
191 avf_init_queues(struct rte_eth_dev *dev)
192 {
193         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
194         struct avf_rx_queue **rxq =
195                 (struct avf_rx_queue **)dev->data->rx_queues;
196         struct avf_tx_queue **txq =
197                 (struct avf_tx_queue **)dev->data->tx_queues;
198         int i, ret = AVF_SUCCESS;
199
200         for (i = 0; i < dev->data->nb_rx_queues; i++) {
201                 if (!rxq[i] || !rxq[i]->q_set)
202                         continue;
203                 ret = avf_init_rxq(dev, rxq[i]);
204                 if (ret != AVF_SUCCESS)
205                         break;
206         }
207         /* TODO: set rx/tx function to vector/scatter/single-segment
208          * according to parameters
209          */
210         return ret;
211 }
212
213 static int
214 avf_start_queues(struct rte_eth_dev *dev)
215 {
216         struct avf_rx_queue *rxq;
217         struct avf_tx_queue *txq;
218         int i;
219
220         for (i = 0; i < dev->data->nb_tx_queues; i++) {
221                 txq = dev->data->tx_queues[i];
222                 if (txq->tx_deferred_start)
223                         continue;
224                 if (avf_dev_tx_queue_start(dev, i) != 0) {
225                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
226                         return -1;
227                 }
228         }
229
230         for (i = 0; i < dev->data->nb_rx_queues; i++) {
231                 rxq = dev->data->rx_queues[i];
232                 if (rxq->rx_deferred_start)
233                         continue;
234                 if (avf_dev_rx_queue_start(dev, i) != 0) {
235                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
236                         return -1;
237                 }
238         }
239
240         return 0;
241 }
242
243 static int
244 avf_dev_start(struct rte_eth_dev *dev)
245 {
246         struct avf_adapter *adapter =
247                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
248         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
249         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
250         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
251         struct rte_intr_handle *intr_handle = dev->intr_handle;
252         uint16_t interval;
253         int i;
254
255         PMD_INIT_FUNC_TRACE();
256
257         hw->adapter_stopped = 0;
258
259         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
260         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
261                                       dev->data->nb_tx_queues);
262
263         /* TODO: Rx interrupt */
264
265         if (avf_init_queues(dev) != 0) {
266                 PMD_DRV_LOG(ERR, "failed to do Queue init");
267                 return -1;
268         }
269
270         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
271                 if (avf_init_rss(adapter) != 0) {
272                         PMD_DRV_LOG(ERR, "configure rss failed");
273                         goto err_rss;
274                 }
275         }
276
277         if (avf_configure_queues(adapter) != 0) {
278                 PMD_DRV_LOG(ERR, "configure queues failed");
279                 goto err_queue;
280         }
281
282         /* Map interrupt for writeback */
283         vf->nb_msix = 1;
284         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
285                 /* If WB_ON_ITR supports, enable it */
286                 vf->msix_base = AVF_RX_VEC_START;
287                 AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1),
288                               AVFINT_DYN_CTLN1_ITR_INDX_MASK |
289                               AVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
290         } else {
291                 /* If no WB_ON_ITR offload flags, need to set interrupt for
292                  * descriptor write back.
293                  */
294                 vf->msix_base = AVF_MISC_VEC_ID;
295
296                 /* set ITR to max */
297                 interval = avf_calc_itr_interval(AVF_QUEUE_ITR_INTERVAL_MAX);
298                 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
299                               AVFINT_DYN_CTL01_INTENA_MASK |
300                               (AVF_ITR_INDEX_DEFAULT <<
301                                AVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
302                               (interval << AVFINT_DYN_CTL01_INTERVAL_SHIFT));
303         }
304         AVF_WRITE_FLUSH(hw);
305         /* map all queues to the same interrupt */
306         for (i = 0; i < dev->data->nb_rx_queues; i++)
307                 vf->rxq_map[0] |= 1 << i;
308         if (avf_config_irq_map(adapter)) {
309                 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
310                 goto err_queue;
311         }
312
313         /* Set all mac addrs */
314         avf_add_del_all_mac_addr(adapter, TRUE);
315
316         if (avf_start_queues(dev) != 0) {
317                 PMD_DRV_LOG(ERR, "enable queues failed");
318                 goto err_mac;
319         }
320
321         /* TODO: enable interrupt for RX interrupt */
322         return 0;
323
324 err_mac:
325         avf_add_del_all_mac_addr(adapter, FALSE);
326 err_queue:
327 err_rss:
328         return -1;
329 }
330
331 static void
332 avf_dev_stop(struct rte_eth_dev *dev)
333 {
334         struct avf_adapter *adapter =
335                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
336         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
337         int ret, i;
338
339         PMD_INIT_FUNC_TRACE();
340
341         if (hw->adapter_stopped == 1)
342                 return;
343
344         avf_stop_queues(dev);
345
346         /*TODO: Disable the interrupt for Rx*/
347
348         /* TODO: Rx interrupt vector mapping free */
349
350         /* remove all mac addrs */
351         avf_add_del_all_mac_addr(adapter, FALSE);
352         hw->adapter_stopped = 1;
353 }
354
355 static void
356 avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
357 {
358         struct avf_adapter *adapter =
359                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
360         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
361
362         memset(dev_info, 0, sizeof(*dev_info));
363         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
364         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
365         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
366         dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
367         dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
368         dev_info->hash_key_size = vf->vf_res->rss_key_size;
369         dev_info->reta_size = vf->vf_res->rss_lut_size;
370         dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
371         dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
372         dev_info->rx_offload_capa =
373                 DEV_RX_OFFLOAD_VLAN_STRIP |
374                 DEV_RX_OFFLOAD_IPV4_CKSUM |
375                 DEV_RX_OFFLOAD_UDP_CKSUM |
376                 DEV_RX_OFFLOAD_TCP_CKSUM;
377         dev_info->tx_offload_capa =
378                 DEV_TX_OFFLOAD_VLAN_INSERT |
379                 DEV_TX_OFFLOAD_IPV4_CKSUM |
380                 DEV_TX_OFFLOAD_UDP_CKSUM |
381                 DEV_TX_OFFLOAD_TCP_CKSUM |
382                 DEV_TX_OFFLOAD_SCTP_CKSUM |
383                 DEV_TX_OFFLOAD_TCP_TSO;
384
385         dev_info->default_rxconf = (struct rte_eth_rxconf) {
386                 .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
387                 .rx_drop_en = 0,
388         };
389
390         dev_info->default_txconf = (struct rte_eth_txconf) {
391                 .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
392                 .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
393                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
394                                 ETH_TXQ_FLAGS_NOOFFLOADS,
395         };
396
397         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
398                 .nb_max = AVF_MAX_RING_DESC,
399                 .nb_min = AVF_MIN_RING_DESC,
400                 .nb_align = AVF_ALIGN_RING_DESC,
401         };
402
403         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
404                 .nb_max = AVF_MAX_RING_DESC,
405                 .nb_min = AVF_MIN_RING_DESC,
406                 .nb_align = AVF_ALIGN_RING_DESC,
407         };
408 }
409
410 static int
411 avf_check_vf_reset_done(struct avf_hw *hw)
412 {
413         int i, reset;
414
415         for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
416                 reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
417                         AVFGEN_RSTAT_VFR_STATE_MASK;
418                 reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
419                 if (reset == VIRTCHNL_VFR_VFACTIVE ||
420                     reset == VIRTCHNL_VFR_COMPLETED)
421                         break;
422                 rte_delay_ms(20);
423         }
424
425         if (i >= AVF_RESET_WAIT_CNT)
426                 return -1;
427
428         return 0;
429 }
430
431 static int
432 avf_init_vf(struct rte_eth_dev *dev)
433 {
434         int i, err, bufsz;
435         struct avf_adapter *adapter =
436                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
437         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
439
440         err = avf_set_mac_type(hw);
441         if (err) {
442                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
443                 goto err;
444         }
445
446         err = avf_check_vf_reset_done(hw);
447         if (err) {
448                 PMD_INIT_LOG(ERR, "VF is still resetting");
449                 goto err;
450         }
451
452         avf_init_adminq_parameter(hw);
453         err = avf_init_adminq(hw);
454         if (err) {
455                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
456                 goto err;
457         }
458
459         vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
460         if (!vf->aq_resp) {
461                 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
462                 goto err_aq;
463         }
464         if (avf_check_api_version(adapter) != 0) {
465                 PMD_INIT_LOG(ERR, "check_api version failed");
466                 goto err_api;
467         }
468
469         bufsz = sizeof(struct virtchnl_vf_resource) +
470                 (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
471         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
472         if (!vf->vf_res) {
473                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
474                 goto err_api;
475         }
476         if (avf_get_vf_resource(adapter) != 0) {
477                 PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
478                 goto err_alloc;
479         }
480         /* Allocate memort for RSS info */
481         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
482                 vf->rss_key = rte_zmalloc("rss_key",
483                                           vf->vf_res->rss_key_size, 0);
484                 if (!vf->rss_key) {
485                         PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
486                         goto err_rss;
487                 }
488                 vf->rss_lut = rte_zmalloc("rss_lut",
489                                           vf->vf_res->rss_lut_size, 0);
490                 if (!vf->rss_lut) {
491                         PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
492                         goto err_rss;
493                 }
494         }
495         return 0;
496 err_rss:
497         rte_free(vf->rss_key);
498         rte_free(vf->rss_lut);
499 err_alloc:
500         rte_free(vf->vf_res);
501         vf->vsi_res = NULL;
502 err_api:
503         rte_free(vf->aq_resp);
504 err_aq:
505         avf_shutdown_adminq(hw);
506 err:
507         return -1;
508 }
509
510 /* Enable default admin queue interrupt setting */
511 static inline void
512 avf_enable_irq0(struct avf_hw *hw)
513 {
514         /* Enable admin queue interrupt trigger */
515         AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
516
517         AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
518                                             AVFINT_DYN_CTL01_ITR_INDX_MASK);
519
520         AVF_WRITE_FLUSH(hw);
521 }
522
523 static inline void
524 avf_disable_irq0(struct avf_hw *hw)
525 {
526         /* Disable all interrupt types */
527         AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
528         AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
529                       AVFINT_DYN_CTL01_ITR_INDX_MASK);
530         AVF_WRITE_FLUSH(hw);
531 }
532
533 static void
534 avf_dev_interrupt_handler(void *param)
535 {
536         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
537         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
538
539         avf_disable_irq0(hw);
540
541         avf_handle_virtchnl_msg(dev);
542
543 done:
544         avf_enable_irq0(hw);
545 }
546
547 static int
548 avf_dev_init(struct rte_eth_dev *eth_dev)
549 {
550         struct avf_adapter *adapter =
551                 AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
552         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
553         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
554
555         PMD_INIT_FUNC_TRACE();
556
557         /* assign ops func pointer */
558         eth_dev->dev_ops = &avf_eth_dev_ops;
559
560         rte_eth_copy_pci_info(eth_dev, pci_dev);
561
562         hw->vendor_id = pci_dev->id.vendor_id;
563         hw->device_id = pci_dev->id.device_id;
564         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
565         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
566         hw->bus.bus_id = pci_dev->addr.bus;
567         hw->bus.device = pci_dev->addr.devid;
568         hw->bus.func = pci_dev->addr.function;
569         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
570         hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
571         adapter->eth_dev = eth_dev;
572
573         if (avf_init_vf(eth_dev) != 0) {
574                 PMD_INIT_LOG(ERR, "Init vf failed");
575                 return -1;
576         }
577
578         /* copy mac addr */
579         eth_dev->data->mac_addrs = rte_zmalloc(
580                                         "avf_mac",
581                                         ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
582                                         0);
583         if (!eth_dev->data->mac_addrs) {
584                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
585                              " store MAC addresses",
586                              ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
587                 return -ENOMEM;
588         }
589         /* If the MAC address is not configured by host,
590          * generate a random one.
591          */
592         if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
593                 eth_random_addr(hw->mac.addr);
594         ether_addr_copy((struct ether_addr *)hw->mac.addr,
595                         &eth_dev->data->mac_addrs[0]);
596
597         /* register callback func to eal lib */
598         rte_intr_callback_register(&pci_dev->intr_handle,
599                                    avf_dev_interrupt_handler,
600                                    (void *)eth_dev);
601
602         /* enable uio intr after callback register */
603         rte_intr_enable(&pci_dev->intr_handle);
604
605         /* configure and enable device interrupt */
606         avf_enable_irq0(hw);
607
608         return 0;
609 }
610
611 static void
612 avf_dev_close(struct rte_eth_dev *dev)
613 {
614         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
615         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
616         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
617
618         avf_dev_stop(dev);
619         avf_shutdown_adminq(hw);
620         /* disable uio intr before callback unregister */
621         rte_intr_disable(intr_handle);
622
623         /* unregister callback func from eal lib */
624         rte_intr_callback_unregister(intr_handle,
625                                      avf_dev_interrupt_handler, dev);
626         avf_disable_irq0(hw);
627 }
628
629 static int
630 avf_dev_uninit(struct rte_eth_dev *dev)
631 {
632         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
633         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
634
635         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
636                 return -EPERM;
637
638         dev->dev_ops = NULL;
639         dev->rx_pkt_burst = NULL;
640         dev->tx_pkt_burst = NULL;
641         if (hw->adapter_stopped == 0)
642                 avf_dev_close(dev);
643
644         rte_free(vf->vf_res);
645         vf->vsi_res = NULL;
646         vf->vf_res = NULL;
647
648         rte_free(vf->aq_resp);
649         vf->aq_resp = NULL;
650
651         rte_free(dev->data->mac_addrs);
652         dev->data->mac_addrs = NULL;
653
654         if (vf->rss_lut) {
655                 rte_free(vf->rss_lut);
656                 vf->rss_lut = NULL;
657         }
658         if (vf->rss_key) {
659                 rte_free(vf->rss_key);
660                 vf->rss_key = NULL;
661         }
662
663         return 0;
664 }
665
666 static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
667                              struct rte_pci_device *pci_dev)
668 {
669         return rte_eth_dev_pci_generic_probe(pci_dev,
670                 sizeof(struct avf_adapter), avf_dev_init);
671 }
672
673 static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
674 {
675         return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
676 }
677
678 /* Adaptive virtual function driver struct */
679 static struct rte_pci_driver rte_avf_pmd = {
680         .id_table = pci_id_avf_map,
681         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
682         .probe = eth_avf_pci_probe,
683         .remove = eth_avf_pci_remove,
684 };
685
686 RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
687 RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
688 RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
689 RTE_INIT(avf_init_log);
690 static void
691 avf_init_log(void)
692 {
693         avf_logtype_init = rte_log_register("pmd.avf.init");
694         if (avf_logtype_init >= 0)
695                 rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
696         avf_logtype_driver = rte_log_register("pmd.avf.driver");
697         if (avf_logtype_driver >= 0)
698                 rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
699 }
700
701 /* memory func for base code */
702 enum avf_status_code
703 avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
704                        struct avf_dma_mem *mem,
705                        u64 size,
706                        u32 alignment)
707 {
708         const struct rte_memzone *mz = NULL;
709         char z_name[RTE_MEMZONE_NAMESIZE];
710
711         if (!mem)
712                 return AVF_ERR_PARAM;
713
714         snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
715         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
716                                          alignment, RTE_PGSIZE_2M);
717         if (!mz)
718                 return AVF_ERR_NO_MEMORY;
719
720         mem->size = size;
721         mem->va = mz->addr;
722         mem->pa = mz->phys_addr;
723         mem->zone = (const void *)mz;
724         PMD_DRV_LOG(DEBUG,
725                     "memzone %s allocated with physical address: %"PRIu64,
726                     mz->name, mem->pa);
727
728         return AVF_SUCCESS;
729 }
730
731 enum avf_status_code
732 avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
733                    struct avf_dma_mem *mem)
734 {
735         if (!mem)
736                 return AVF_ERR_PARAM;
737
738         PMD_DRV_LOG(DEBUG,
739                     "memzone %s to be freed with physical address: %"PRIu64,
740                     ((const struct rte_memzone *)mem->zone)->name, mem->pa);
741         rte_memzone_free((const struct rte_memzone *)mem->zone);
742         mem->zone = NULL;
743         mem->va = NULL;
744         mem->pa = (u64)0;
745
746         return AVF_SUCCESS;
747 }
748
749 enum avf_status_code
750 avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
751                         struct avf_virt_mem *mem,
752                         u32 size)
753 {
754         if (!mem)
755                 return AVF_ERR_PARAM;
756
757         mem->size = size;
758         mem->va = rte_zmalloc("avf", size, 0);
759
760         if (mem->va)
761                 return AVF_SUCCESS;
762         else
763                 return AVF_ERR_NO_MEMORY;
764 }
765
766 enum avf_status_code
767 avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
768                     struct avf_virt_mem *mem)
769 {
770         if (!mem)
771                 return AVF_ERR_PARAM;
772
773         rte_free(mem->va);
774         mem->va = NULL;
775
776         return AVF_SUCCESS;
777 }