net/avf: enable basic Rx Tx
[dpdk.git] / drivers / net / avf / avf_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
18 #include <rte_pci.h>
19 #include <rte_atomic.h>
20 #include <rte_eal.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev.h>
23 #include <rte_ethdev_pci.h>
24 #include <rte_malloc.h>
25 #include <rte_memzone.h>
26 #include <rte_dev.h>
27
28 #include "avf_log.h"
29 #include "base/avf_prototype.h"
30 #include "base/avf_adminq_cmd.h"
31 #include "base/avf_type.h"
32
33 #include "avf.h"
34 #include "avf_rxtx.h"
35
36 static int avf_dev_configure(struct rte_eth_dev *dev);
37 static int avf_dev_start(struct rte_eth_dev *dev);
38 static void avf_dev_stop(struct rte_eth_dev *dev);
39 static void avf_dev_close(struct rte_eth_dev *dev);
40 static void avf_dev_info_get(struct rte_eth_dev *dev,
41                              struct rte_eth_dev_info *dev_info);
42 static const uint32_t *avf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
43
44 int avf_logtype_init;
45 int avf_logtype_driver;
46 static const struct rte_pci_id pci_id_avf_map[] = {
47         { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
48         { .vendor_id = 0, /* sentinel */ },
49 };
50
51 static const struct eth_dev_ops avf_eth_dev_ops = {
52         .dev_configure              = avf_dev_configure,
53         .dev_start                  = avf_dev_start,
54         .dev_stop                   = avf_dev_stop,
55         .dev_close                  = avf_dev_close,
56         .dev_infos_get              = avf_dev_info_get,
57         .dev_supported_ptypes_get   = avf_dev_supported_ptypes_get,
58         .rx_queue_start             = avf_dev_rx_queue_start,
59         .rx_queue_stop              = avf_dev_rx_queue_stop,
60         .tx_queue_start             = avf_dev_tx_queue_start,
61         .tx_queue_stop              = avf_dev_tx_queue_stop,
62         .rx_queue_setup             = avf_dev_rx_queue_setup,
63         .rx_queue_release           = avf_dev_rx_queue_release,
64         .tx_queue_setup             = avf_dev_tx_queue_setup,
65         .tx_queue_release           = avf_dev_tx_queue_release,
66 };
67
68 static int
69 avf_dev_configure(struct rte_eth_dev *dev)
70 {
71         struct avf_adapter *ad =
72                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
73         struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(ad);
74         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
75
76         /* Vlan stripping setting */
77         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
78                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
79                         avf_enable_vlan_strip(ad);
80                 else
81                         avf_disable_vlan_strip(ad);
82         }
83         return 0;
84 }
85
86 static int
87 avf_init_rss(struct avf_adapter *adapter)
88 {
89         struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(adapter);
90         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
91         struct rte_eth_rss_conf *rss_conf;
92         uint8_t i, j, nb_q;
93         int ret;
94
95         rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
96         nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
97                        AVF_MAX_NUM_QUEUES);
98
99         if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
100                 PMD_DRV_LOG(DEBUG, "RSS is not supported");
101                 return -ENOTSUP;
102         }
103         if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
104                 PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
105                 /* set all lut items to default queue */
106                 for (i = 0; i < vf->vf_res->rss_lut_size; i++)
107                         vf->rss_lut[i] = 0;
108                 ret = avf_configure_rss_lut(adapter);
109                 return ret;
110         }
111
112         /* In AVF, RSS enablement is set by PF driver. It is not supported
113          * to set based on rss_conf->rss_hf.
114          */
115
116         /* configure RSS key */
117         if (!rss_conf->rss_key) {
118                 /* Calculate the default hash key */
119                 for (i = 0; i <= vf->vf_res->rss_key_size; i++)
120                         vf->rss_key[i] = (uint8_t)rte_rand();
121         } else
122                 rte_memcpy(vf->rss_key, rss_conf->rss_key,
123                            RTE_MIN(rss_conf->rss_key_len,
124                                    vf->vf_res->rss_key_size));
125
126         /* init RSS LUT table */
127         for (i = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
128                 if (j >= nb_q)
129                         j = 0;
130                 vf->rss_lut[i] = j;
131         }
132         /* send virtchnnl ops to configure rss*/
133         ret = avf_configure_rss_lut(adapter);
134         if (ret)
135                 return ret;
136         ret = avf_configure_rss_key(adapter);
137         if (ret)
138                 return ret;
139
140         return 0;
141 }
142
143 static int
144 avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
145 {
146         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
147         struct rte_eth_dev_data *dev_data = dev->data;
148         uint16_t buf_size, max_pkt_len, len;
149
150         buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
151
152         /* Calculate the maximum packet length allowed */
153         len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS;
154         max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
155
156         /* Check if the jumbo frame and maximum packet length are set
157          * correctly.
158          */
159         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
160                 if (max_pkt_len <= ETHER_MAX_LEN ||
161                     max_pkt_len > AVF_FRAME_SIZE_MAX) {
162                         PMD_DRV_LOG(ERR, "maximum packet length must be "
163                                     "larger than %u and smaller than %u, "
164                                     "as jumbo frame is enabled",
165                                     (uint32_t)ETHER_MAX_LEN,
166                                     (uint32_t)AVF_FRAME_SIZE_MAX);
167                         return -EINVAL;
168                 }
169         } else {
170                 if (max_pkt_len < ETHER_MIN_LEN ||
171                     max_pkt_len > ETHER_MAX_LEN) {
172                         PMD_DRV_LOG(ERR, "maximum packet length must be "
173                                     "larger than %u and smaller than %u, "
174                                     "as jumbo frame is disabled",
175                                     (uint32_t)ETHER_MIN_LEN,
176                                     (uint32_t)ETHER_MAX_LEN);
177                         return -EINVAL;
178                 }
179         }
180
181         rxq->max_pkt_len = max_pkt_len;
182         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
183             (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
184                 dev_data->scattered_rx = 1;
185         }
186         AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
187         AVF_WRITE_FLUSH(hw);
188
189         return 0;
190 }
191
192 static int
193 avf_init_queues(struct rte_eth_dev *dev)
194 {
195         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
196         struct avf_rx_queue **rxq =
197                 (struct avf_rx_queue **)dev->data->rx_queues;
198         struct avf_tx_queue **txq =
199                 (struct avf_tx_queue **)dev->data->tx_queues;
200         int i, ret = AVF_SUCCESS;
201
202         for (i = 0; i < dev->data->nb_rx_queues; i++) {
203                 if (!rxq[i] || !rxq[i]->q_set)
204                         continue;
205                 ret = avf_init_rxq(dev, rxq[i]);
206                 if (ret != AVF_SUCCESS)
207                         break;
208         }
209         /* set rx/tx function to vector/scatter/single-segment
210          * according to parameters
211          */
212         avf_set_rx_function(dev);
213         avf_set_tx_function(dev);
214
215         return ret;
216 }
217
218 static int
219 avf_start_queues(struct rte_eth_dev *dev)
220 {
221         struct avf_rx_queue *rxq;
222         struct avf_tx_queue *txq;
223         int i;
224
225         for (i = 0; i < dev->data->nb_tx_queues; i++) {
226                 txq = dev->data->tx_queues[i];
227                 if (txq->tx_deferred_start)
228                         continue;
229                 if (avf_dev_tx_queue_start(dev, i) != 0) {
230                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
231                         return -1;
232                 }
233         }
234
235         for (i = 0; i < dev->data->nb_rx_queues; i++) {
236                 rxq = dev->data->rx_queues[i];
237                 if (rxq->rx_deferred_start)
238                         continue;
239                 if (avf_dev_rx_queue_start(dev, i) != 0) {
240                         PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
241                         return -1;
242                 }
243         }
244
245         return 0;
246 }
247
248 static int
249 avf_dev_start(struct rte_eth_dev *dev)
250 {
251         struct avf_adapter *adapter =
252                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
253         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
254         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
255         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
256         struct rte_intr_handle *intr_handle = dev->intr_handle;
257         uint16_t interval;
258         int i;
259
260         PMD_INIT_FUNC_TRACE();
261
262         hw->adapter_stopped = 0;
263
264         vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
265         vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
266                                       dev->data->nb_tx_queues);
267
268         /* TODO: Rx interrupt */
269
270         if (avf_init_queues(dev) != 0) {
271                 PMD_DRV_LOG(ERR, "failed to do Queue init");
272                 return -1;
273         }
274
275         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
276                 if (avf_init_rss(adapter) != 0) {
277                         PMD_DRV_LOG(ERR, "configure rss failed");
278                         goto err_rss;
279                 }
280         }
281
282         if (avf_configure_queues(adapter) != 0) {
283                 PMD_DRV_LOG(ERR, "configure queues failed");
284                 goto err_queue;
285         }
286
287         /* Map interrupt for writeback */
288         vf->nb_msix = 1;
289         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
290                 /* If WB_ON_ITR supports, enable it */
291                 vf->msix_base = AVF_RX_VEC_START;
292                 AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1),
293                               AVFINT_DYN_CTLN1_ITR_INDX_MASK |
294                               AVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
295         } else {
296                 /* If no WB_ON_ITR offload flags, need to set interrupt for
297                  * descriptor write back.
298                  */
299                 vf->msix_base = AVF_MISC_VEC_ID;
300
301                 /* set ITR to max */
302                 interval = avf_calc_itr_interval(AVF_QUEUE_ITR_INTERVAL_MAX);
303                 AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
304                               AVFINT_DYN_CTL01_INTENA_MASK |
305                               (AVF_ITR_INDEX_DEFAULT <<
306                                AVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
307                               (interval << AVFINT_DYN_CTL01_INTERVAL_SHIFT));
308         }
309         AVF_WRITE_FLUSH(hw);
310         /* map all queues to the same interrupt */
311         for (i = 0; i < dev->data->nb_rx_queues; i++)
312                 vf->rxq_map[0] |= 1 << i;
313         if (avf_config_irq_map(adapter)) {
314                 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
315                 goto err_queue;
316         }
317
318         /* Set all mac addrs */
319         avf_add_del_all_mac_addr(adapter, TRUE);
320
321         if (avf_start_queues(dev) != 0) {
322                 PMD_DRV_LOG(ERR, "enable queues failed");
323                 goto err_mac;
324         }
325
326         /* TODO: enable interrupt for RX interrupt */
327         return 0;
328
329 err_mac:
330         avf_add_del_all_mac_addr(adapter, FALSE);
331 err_queue:
332 err_rss:
333         return -1;
334 }
335
336 static void
337 avf_dev_stop(struct rte_eth_dev *dev)
338 {
339         struct avf_adapter *adapter =
340                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
341         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
342         int ret, i;
343
344         PMD_INIT_FUNC_TRACE();
345
346         if (hw->adapter_stopped == 1)
347                 return;
348
349         avf_stop_queues(dev);
350
351         /*TODO: Disable the interrupt for Rx*/
352
353         /* TODO: Rx interrupt vector mapping free */
354
355         /* remove all mac addrs */
356         avf_add_del_all_mac_addr(adapter, FALSE);
357         hw->adapter_stopped = 1;
358 }
359
360 static void
361 avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
362 {
363         struct avf_adapter *adapter =
364                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
365         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
366
367         memset(dev_info, 0, sizeof(*dev_info));
368         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
369         dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
370         dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
371         dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
372         dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
373         dev_info->hash_key_size = vf->vf_res->rss_key_size;
374         dev_info->reta_size = vf->vf_res->rss_lut_size;
375         dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
376         dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
377         dev_info->rx_offload_capa =
378                 DEV_RX_OFFLOAD_VLAN_STRIP |
379                 DEV_RX_OFFLOAD_IPV4_CKSUM |
380                 DEV_RX_OFFLOAD_UDP_CKSUM |
381                 DEV_RX_OFFLOAD_TCP_CKSUM;
382         dev_info->tx_offload_capa =
383                 DEV_TX_OFFLOAD_VLAN_INSERT |
384                 DEV_TX_OFFLOAD_IPV4_CKSUM |
385                 DEV_TX_OFFLOAD_UDP_CKSUM |
386                 DEV_TX_OFFLOAD_TCP_CKSUM |
387                 DEV_TX_OFFLOAD_SCTP_CKSUM |
388                 DEV_TX_OFFLOAD_TCP_TSO;
389
390         dev_info->default_rxconf = (struct rte_eth_rxconf) {
391                 .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
392                 .rx_drop_en = 0,
393         };
394
395         dev_info->default_txconf = (struct rte_eth_txconf) {
396                 .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
397                 .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
398                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
399                                 ETH_TXQ_FLAGS_NOOFFLOADS,
400         };
401
402         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
403                 .nb_max = AVF_MAX_RING_DESC,
404                 .nb_min = AVF_MIN_RING_DESC,
405                 .nb_align = AVF_ALIGN_RING_DESC,
406         };
407
408         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
409                 .nb_max = AVF_MAX_RING_DESC,
410                 .nb_min = AVF_MIN_RING_DESC,
411                 .nb_align = AVF_ALIGN_RING_DESC,
412         };
413 }
414
415 static const uint32_t *
416 avf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
417 {
418         static const uint32_t ptypes[] = {
419                 RTE_PTYPE_L2_ETHER,
420                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
421                 RTE_PTYPE_L4_FRAG,
422                 RTE_PTYPE_L4_ICMP,
423                 RTE_PTYPE_L4_NONFRAG,
424                 RTE_PTYPE_L4_SCTP,
425                 RTE_PTYPE_L4_TCP,
426                 RTE_PTYPE_L4_UDP,
427                 RTE_PTYPE_UNKNOWN
428         };
429         return ptypes;
430 }
431
432 static int
433 avf_check_vf_reset_done(struct avf_hw *hw)
434 {
435         int i, reset;
436
437         for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
438                 reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
439                         AVFGEN_RSTAT_VFR_STATE_MASK;
440                 reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
441                 if (reset == VIRTCHNL_VFR_VFACTIVE ||
442                     reset == VIRTCHNL_VFR_COMPLETED)
443                         break;
444                 rte_delay_ms(20);
445         }
446
447         if (i >= AVF_RESET_WAIT_CNT)
448                 return -1;
449
450         return 0;
451 }
452
453 static int
454 avf_init_vf(struct rte_eth_dev *dev)
455 {
456         int i, err, bufsz;
457         struct avf_adapter *adapter =
458                 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
459         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
460         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
461
462         err = avf_set_mac_type(hw);
463         if (err) {
464                 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
465                 goto err;
466         }
467
468         err = avf_check_vf_reset_done(hw);
469         if (err) {
470                 PMD_INIT_LOG(ERR, "VF is still resetting");
471                 goto err;
472         }
473
474         avf_init_adminq_parameter(hw);
475         err = avf_init_adminq(hw);
476         if (err) {
477                 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
478                 goto err;
479         }
480
481         vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
482         if (!vf->aq_resp) {
483                 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
484                 goto err_aq;
485         }
486         if (avf_check_api_version(adapter) != 0) {
487                 PMD_INIT_LOG(ERR, "check_api version failed");
488                 goto err_api;
489         }
490
491         bufsz = sizeof(struct virtchnl_vf_resource) +
492                 (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
493         vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
494         if (!vf->vf_res) {
495                 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
496                 goto err_api;
497         }
498         if (avf_get_vf_resource(adapter) != 0) {
499                 PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
500                 goto err_alloc;
501         }
502         /* Allocate memort for RSS info */
503         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
504                 vf->rss_key = rte_zmalloc("rss_key",
505                                           vf->vf_res->rss_key_size, 0);
506                 if (!vf->rss_key) {
507                         PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
508                         goto err_rss;
509                 }
510                 vf->rss_lut = rte_zmalloc("rss_lut",
511                                           vf->vf_res->rss_lut_size, 0);
512                 if (!vf->rss_lut) {
513                         PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
514                         goto err_rss;
515                 }
516         }
517         return 0;
518 err_rss:
519         rte_free(vf->rss_key);
520         rte_free(vf->rss_lut);
521 err_alloc:
522         rte_free(vf->vf_res);
523         vf->vsi_res = NULL;
524 err_api:
525         rte_free(vf->aq_resp);
526 err_aq:
527         avf_shutdown_adminq(hw);
528 err:
529         return -1;
530 }
531
532 /* Enable default admin queue interrupt setting */
533 static inline void
534 avf_enable_irq0(struct avf_hw *hw)
535 {
536         /* Enable admin queue interrupt trigger */
537         AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
538
539         AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
540                                             AVFINT_DYN_CTL01_ITR_INDX_MASK);
541
542         AVF_WRITE_FLUSH(hw);
543 }
544
545 static inline void
546 avf_disable_irq0(struct avf_hw *hw)
547 {
548         /* Disable all interrupt types */
549         AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
550         AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
551                       AVFINT_DYN_CTL01_ITR_INDX_MASK);
552         AVF_WRITE_FLUSH(hw);
553 }
554
555 static void
556 avf_dev_interrupt_handler(void *param)
557 {
558         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
559         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
560
561         avf_disable_irq0(hw);
562
563         avf_handle_virtchnl_msg(dev);
564
565 done:
566         avf_enable_irq0(hw);
567 }
568
569 static int
570 avf_dev_init(struct rte_eth_dev *eth_dev)
571 {
572         struct avf_adapter *adapter =
573                 AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
574         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
575         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
576
577         PMD_INIT_FUNC_TRACE();
578
579         /* assign ops func pointer */
580         eth_dev->dev_ops = &avf_eth_dev_ops;
581         eth_dev->rx_pkt_burst = &avf_recv_pkts;
582         eth_dev->tx_pkt_burst = &avf_xmit_pkts;
583         eth_dev->tx_pkt_prepare = &avf_prep_pkts;
584
585         /* For secondary processes, we don't initialise any further as primary
586          * has already done this work. Only check if we need a different RX
587          * and TX function.
588          */
589         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
590                 avf_set_rx_function(eth_dev);
591                 avf_set_tx_function(eth_dev);
592                 return 0;
593         }
594         rte_eth_copy_pci_info(eth_dev, pci_dev);
595
596         hw->vendor_id = pci_dev->id.vendor_id;
597         hw->device_id = pci_dev->id.device_id;
598         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
599         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
600         hw->bus.bus_id = pci_dev->addr.bus;
601         hw->bus.device = pci_dev->addr.devid;
602         hw->bus.func = pci_dev->addr.function;
603         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
604         hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
605         adapter->eth_dev = eth_dev;
606
607         if (avf_init_vf(eth_dev) != 0) {
608                 PMD_INIT_LOG(ERR, "Init vf failed");
609                 return -1;
610         }
611
612         /* copy mac addr */
613         eth_dev->data->mac_addrs = rte_zmalloc(
614                                         "avf_mac",
615                                         ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
616                                         0);
617         if (!eth_dev->data->mac_addrs) {
618                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
619                              " store MAC addresses",
620                              ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
621                 return -ENOMEM;
622         }
623         /* If the MAC address is not configured by host,
624          * generate a random one.
625          */
626         if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
627                 eth_random_addr(hw->mac.addr);
628         ether_addr_copy((struct ether_addr *)hw->mac.addr,
629                         &eth_dev->data->mac_addrs[0]);
630
631         /* register callback func to eal lib */
632         rte_intr_callback_register(&pci_dev->intr_handle,
633                                    avf_dev_interrupt_handler,
634                                    (void *)eth_dev);
635
636         /* enable uio intr after callback register */
637         rte_intr_enable(&pci_dev->intr_handle);
638
639         /* configure and enable device interrupt */
640         avf_enable_irq0(hw);
641
642         return 0;
643 }
644
645 static void
646 avf_dev_close(struct rte_eth_dev *dev)
647 {
648         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
649         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
650         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
651
652         avf_dev_stop(dev);
653         avf_shutdown_adminq(hw);
654         /* disable uio intr before callback unregister */
655         rte_intr_disable(intr_handle);
656
657         /* unregister callback func from eal lib */
658         rte_intr_callback_unregister(intr_handle,
659                                      avf_dev_interrupt_handler, dev);
660         avf_disable_irq0(hw);
661 }
662
663 static int
664 avf_dev_uninit(struct rte_eth_dev *dev)
665 {
666         struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
667         struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
668
669         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
670                 return -EPERM;
671
672         dev->dev_ops = NULL;
673         dev->rx_pkt_burst = NULL;
674         dev->tx_pkt_burst = NULL;
675         if (hw->adapter_stopped == 0)
676                 avf_dev_close(dev);
677
678         rte_free(vf->vf_res);
679         vf->vsi_res = NULL;
680         vf->vf_res = NULL;
681
682         rte_free(vf->aq_resp);
683         vf->aq_resp = NULL;
684
685         rte_free(dev->data->mac_addrs);
686         dev->data->mac_addrs = NULL;
687
688         if (vf->rss_lut) {
689                 rte_free(vf->rss_lut);
690                 vf->rss_lut = NULL;
691         }
692         if (vf->rss_key) {
693                 rte_free(vf->rss_key);
694                 vf->rss_key = NULL;
695         }
696
697         return 0;
698 }
699
700 static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
701                              struct rte_pci_device *pci_dev)
702 {
703         return rte_eth_dev_pci_generic_probe(pci_dev,
704                 sizeof(struct avf_adapter), avf_dev_init);
705 }
706
707 static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
708 {
709         return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
710 }
711
712 /* Adaptive virtual function driver struct */
713 static struct rte_pci_driver rte_avf_pmd = {
714         .id_table = pci_id_avf_map,
715         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
716         .probe = eth_avf_pci_probe,
717         .remove = eth_avf_pci_remove,
718 };
719
720 RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
721 RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
722 RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
723 RTE_INIT(avf_init_log);
724 static void
725 avf_init_log(void)
726 {
727         avf_logtype_init = rte_log_register("pmd.avf.init");
728         if (avf_logtype_init >= 0)
729                 rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
730         avf_logtype_driver = rte_log_register("pmd.avf.driver");
731         if (avf_logtype_driver >= 0)
732                 rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
733 }
734
735 /* memory func for base code */
736 enum avf_status_code
737 avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
738                        struct avf_dma_mem *mem,
739                        u64 size,
740                        u32 alignment)
741 {
742         const struct rte_memzone *mz = NULL;
743         char z_name[RTE_MEMZONE_NAMESIZE];
744
745         if (!mem)
746                 return AVF_ERR_PARAM;
747
748         snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
749         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
750                                          alignment, RTE_PGSIZE_2M);
751         if (!mz)
752                 return AVF_ERR_NO_MEMORY;
753
754         mem->size = size;
755         mem->va = mz->addr;
756         mem->pa = mz->phys_addr;
757         mem->zone = (const void *)mz;
758         PMD_DRV_LOG(DEBUG,
759                     "memzone %s allocated with physical address: %"PRIu64,
760                     mz->name, mem->pa);
761
762         return AVF_SUCCESS;
763 }
764
765 enum avf_status_code
766 avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
767                    struct avf_dma_mem *mem)
768 {
769         if (!mem)
770                 return AVF_ERR_PARAM;
771
772         PMD_DRV_LOG(DEBUG,
773                     "memzone %s to be freed with physical address: %"PRIu64,
774                     ((const struct rte_memzone *)mem->zone)->name, mem->pa);
775         rte_memzone_free((const struct rte_memzone *)mem->zone);
776         mem->zone = NULL;
777         mem->va = NULL;
778         mem->pa = (u64)0;
779
780         return AVF_SUCCESS;
781 }
782
783 enum avf_status_code
784 avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
785                         struct avf_virt_mem *mem,
786                         u32 size)
787 {
788         if (!mem)
789                 return AVF_ERR_PARAM;
790
791         mem->size = size;
792         mem->va = rte_zmalloc("avf", size, 0);
793
794         if (mem->va)
795                 return AVF_SUCCESS;
796         else
797                 return AVF_ERR_NO_MEMORY;
798 }
799
800 enum avf_status_code
801 avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
802                     struct avf_virt_mem *mem)
803 {
804         if (!mem)
805                 return AVF_ERR_PARAM;
806
807         rte_free(mem->va);
808         mem->va = NULL;
809
810         return AVF_SUCCESS;
811 }