4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48 /* Max try times to acquire switch status */
49 #define MAX_QUERY_SWITCH_STATE_TIMES 10
50 /* Wait interval to get switch status */
51 #define WAIT_SWITCH_MSG_US 100000
52 /* Number of chars per uint32 type */
53 #define CHARS_PER_UINT32 (sizeof(uint32_t))
54 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
56 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
57 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
58 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
59 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
60 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
61 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
63 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
65 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add);
68 fm10k_mbx_initlock(struct fm10k_hw *hw)
70 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
74 fm10k_mbx_lock(struct fm10k_hw *hw)
76 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
77 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
81 fm10k_mbx_unlock(struct fm10k_hw *hw)
83 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
87 * reset queue to initial state, allocate software buffers used when starting
90 * return -ENOMEM if buffers cannot be allocated
91 * return -EINVAL if buffers do not satisfy alignment condition
94 rx_queue_reset(struct fm10k_rx_queue *q)
98 PMD_INIT_FUNC_TRACE();
100 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
104 for (i = 0; i < q->nb_desc; ++i) {
105 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
106 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
107 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
111 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
112 q->hw_ring[i].q.pkt_addr = dma_addr;
113 q->hw_ring[i].q.hdr_addr = dma_addr;
118 q->next_trigger = q->alloc_thresh - 1;
119 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
124 * clean queue, descriptor rings, free software buffers used when stopping
128 rx_queue_clean(struct fm10k_rx_queue *q)
130 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
132 PMD_INIT_FUNC_TRACE();
134 /* zero descriptor rings */
135 for (i = 0; i < q->nb_desc; ++i)
136 q->hw_ring[i] = zero;
138 /* free software buffers */
139 for (i = 0; i < q->nb_desc; ++i) {
141 rte_pktmbuf_free_seg(q->sw_ring[i]);
142 q->sw_ring[i] = NULL;
148 * free all queue memory used when releasing the queue (i.e. configure)
151 rx_queue_free(struct fm10k_rx_queue *q)
153 PMD_INIT_FUNC_TRACE();
155 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
158 rte_free(q->sw_ring);
167 * disable RX queue, wait unitl HW finished necessary flush operation
170 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
174 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
175 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
176 reg & ~FM10K_RXQCTL_ENABLE);
178 /* Wait 100us at most */
179 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
181 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
182 if (!(reg & FM10K_RXQCTL_ENABLE))
186 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
193 * reset queue to initial state, allocate software buffers used when starting
197 tx_queue_reset(struct fm10k_tx_queue *q)
199 PMD_INIT_FUNC_TRACE();
203 q->nb_free = q->nb_desc - 1;
204 q->free_trigger = q->nb_free - q->free_thresh;
205 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
206 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
210 * clean queue, descriptor rings, free software buffers used when stopping
214 tx_queue_clean(struct fm10k_tx_queue *q)
216 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
218 PMD_INIT_FUNC_TRACE();
220 /* zero descriptor rings */
221 for (i = 0; i < q->nb_desc; ++i)
222 q->hw_ring[i] = zero;
224 /* free software buffers */
225 for (i = 0; i < q->nb_desc; ++i) {
227 rte_pktmbuf_free_seg(q->sw_ring[i]);
228 q->sw_ring[i] = NULL;
234 * free all queue memory used when releasing the queue (i.e. configure)
237 tx_queue_free(struct fm10k_tx_queue *q)
239 PMD_INIT_FUNC_TRACE();
241 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
243 if (q->rs_tracker.list) {
244 rte_free(q->rs_tracker.list);
245 q->rs_tracker.list = NULL;
248 rte_free(q->sw_ring);
257 * disable TX queue, wait unitl HW finished necessary flush operation
260 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
264 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
265 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
266 reg & ~FM10K_TXDCTL_ENABLE);
268 /* Wait 100us at most */
269 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
271 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
272 if (!(reg & FM10K_TXDCTL_ENABLE))
276 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
283 fm10k_dev_configure(struct rte_eth_dev *dev)
285 PMD_INIT_FUNC_TRACE();
287 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
288 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
294 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
296 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
297 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
298 uint32_t mrqc, *key, i, reta, j;
301 #define RSS_KEY_SIZE 40
302 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
303 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
310 if (dev->data->nb_rx_queues == 1 ||
311 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
312 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
315 /* random key is rss_intel_key (default) or user provided (rss_key) */
316 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
317 key = (uint32_t *)rss_intel_key;
319 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
321 /* Now fill our hash function seeds, 4 bytes at a time */
322 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
323 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
326 * Fill in redirection table
327 * The byte-swap is needed because NIC registers are in
328 * little-endian order.
331 for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) {
332 if (j == dev->data->nb_rx_queues)
334 reta = (reta << CHAR_BIT) | j;
336 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
341 * Generate RSS hash based on packet types, TCP/UDP
342 * port numbers and/or IPv4/v6 src and dst addresses
344 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
346 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
347 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
348 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
349 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
350 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
351 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
352 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
353 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
354 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
357 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
362 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
366 fm10k_dev_tx_init(struct rte_eth_dev *dev)
368 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
370 struct fm10k_tx_queue *txq;
374 /* Disable TXINT to avoid possible interrupt */
375 for (i = 0; i < hw->mac.max_queues; i++)
376 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
377 3 << FM10K_TXINT_TIMER_SHIFT);
380 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
381 txq = dev->data->tx_queues[i];
382 base_addr = txq->hw_ring_phys_addr;
383 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
385 /* disable queue to avoid issues while updating state */
386 ret = tx_queue_disable(hw, i);
388 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
392 /* set location and size for descriptor ring */
393 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
394 base_addr & UINT64_LOWER_32BITS_MASK);
395 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
396 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
397 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
403 fm10k_dev_rx_init(struct rte_eth_dev *dev)
405 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
407 struct fm10k_rx_queue *rxq;
410 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
413 /* Disable RXINT to avoid possible interrupt */
414 for (i = 0; i < hw->mac.max_queues; i++)
415 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
416 3 << FM10K_RXINT_TIMER_SHIFT);
418 /* Setup RX queues */
419 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
420 rxq = dev->data->rx_queues[i];
421 base_addr = rxq->hw_ring_phys_addr;
422 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
424 /* disable queue to avoid issues while updating state */
425 ret = rx_queue_disable(hw, i);
427 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
431 /* Setup the Base and Length of the Rx Descriptor Ring */
432 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
433 base_addr & UINT64_LOWER_32BITS_MASK);
434 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
435 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
436 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
438 /* Configure the Rx buffer size for one buff without split */
439 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
440 RTE_PKTMBUF_HEADROOM);
441 /* As RX buffer is aligned to 512B within mbuf, some bytes are
442 * reserved for this purpose, and the worst case could be 511B.
443 * But SRR reg assumes all buffers have the same size. In order
444 * to fill the gap, we'll have to consider the worst case and
445 * assume 512B is reserved. If we don't do so, it's possible
446 * for HW to overwrite data to next mbuf.
448 buf_size -= FM10K_RX_DATABUF_ALIGN;
450 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
451 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
453 /* It adds dual VLAN length for supporting dual VLAN */
454 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
455 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
456 dev->data->dev_conf.rxmode.enable_scatter) {
458 dev->data->scattered_rx = 1;
459 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
460 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
461 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
462 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
465 /* Enable drop on empty, it's RO for VF */
466 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
467 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
469 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
470 FM10K_WRITE_FLUSH(hw);
473 /* Configure RSS if applicable */
474 fm10k_dev_mq_rx_configure(dev);
479 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
481 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
484 struct fm10k_rx_queue *rxq;
486 PMD_INIT_FUNC_TRACE();
488 if (rx_queue_id < dev->data->nb_rx_queues) {
489 rxq = dev->data->rx_queues[rx_queue_id];
490 err = rx_queue_reset(rxq);
491 if (err == -ENOMEM) {
492 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
494 } else if (err == -EINVAL) {
495 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
500 /* Setup the HW Rx Head and Tail Descriptor Pointers
501 * Note: this must be done AFTER the queue is enabled on real
502 * hardware, but BEFORE the queue is enabled when using the
503 * emulation platform. Do it in both places for now and remove
504 * this comment and the following two register writes when the
505 * emulation platform is no longer being used.
507 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
508 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
510 /* Set PF ownership flag for PF devices */
511 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
512 if (hw->mac.type == fm10k_mac_pf)
513 reg |= FM10K_RXQCTL_PF;
514 reg |= FM10K_RXQCTL_ENABLE;
515 /* enable RX queue */
516 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
517 FM10K_WRITE_FLUSH(hw);
519 /* Setup the HW Rx Head and Tail Descriptor Pointers
520 * Note: this must be done AFTER the queue is enabled
522 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
523 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
530 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
532 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
534 PMD_INIT_FUNC_TRACE();
536 if (rx_queue_id < dev->data->nb_rx_queues) {
537 /* Disable RX queue */
538 rx_queue_disable(hw, rx_queue_id);
540 /* Free mbuf and clean HW ring */
541 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
548 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
550 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
551 /** @todo - this should be defined in the shared code */
552 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
553 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
556 PMD_INIT_FUNC_TRACE();
558 if (tx_queue_id < dev->data->nb_tx_queues) {
559 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
561 /* reset head and tail pointers */
562 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
563 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
565 /* enable TX queue */
566 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
567 FM10K_TXDCTL_ENABLE | txdctl);
568 FM10K_WRITE_FLUSH(hw);
576 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
578 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
580 PMD_INIT_FUNC_TRACE();
582 if (tx_queue_id < dev->data->nb_tx_queues) {
583 tx_queue_disable(hw, tx_queue_id);
584 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
590 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
592 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
593 != FM10K_DGLORTMAP_NONE);
597 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
599 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
602 PMD_INIT_FUNC_TRACE();
604 /* Return if it didn't acquire valid glort range */
605 if (!fm10k_glort_valid(hw))
609 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
610 FM10K_XCAST_MODE_PROMISC);
611 fm10k_mbx_unlock(hw);
613 if (status != FM10K_SUCCESS)
614 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
618 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
620 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
624 PMD_INIT_FUNC_TRACE();
626 /* Return if it didn't acquire valid glort range */
627 if (!fm10k_glort_valid(hw))
630 if (dev->data->all_multicast == 1)
631 mode = FM10K_XCAST_MODE_ALLMULTI;
633 mode = FM10K_XCAST_MODE_NONE;
636 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
638 fm10k_mbx_unlock(hw);
640 if (status != FM10K_SUCCESS)
641 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
645 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
647 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
650 PMD_INIT_FUNC_TRACE();
652 /* Return if it didn't acquire valid glort range */
653 if (!fm10k_glort_valid(hw))
656 /* If promiscuous mode is enabled, it doesn't make sense to enable
657 * allmulticast and disable promiscuous since fm10k only can select
660 if (dev->data->promiscuous) {
661 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
662 "needn't enable allmulticast");
667 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
668 FM10K_XCAST_MODE_ALLMULTI);
669 fm10k_mbx_unlock(hw);
671 if (status != FM10K_SUCCESS)
672 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
676 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
678 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
681 PMD_INIT_FUNC_TRACE();
683 /* Return if it didn't acquire valid glort range */
684 if (!fm10k_glort_valid(hw))
687 if (dev->data->promiscuous) {
688 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
689 "since promisc mode is enabled");
694 /* Change mode to unicast mode */
695 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
696 FM10K_XCAST_MODE_NONE);
697 fm10k_mbx_unlock(hw);
699 if (status != FM10K_SUCCESS)
700 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
703 /* fls = find last set bit = 32 minus the number of leading zeros */
705 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
707 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
709 fm10k_dev_start(struct rte_eth_dev *dev)
711 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
714 PMD_INIT_FUNC_TRACE();
716 /* stop, init, then start the hw */
717 diag = fm10k_stop_hw(hw);
718 if (diag != FM10K_SUCCESS) {
719 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
723 diag = fm10k_init_hw(hw);
724 if (diag != FM10K_SUCCESS) {
725 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
729 diag = fm10k_start_hw(hw);
730 if (diag != FM10K_SUCCESS) {
731 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
735 diag = fm10k_dev_tx_init(dev);
737 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
741 diag = fm10k_dev_rx_init(dev);
743 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
747 if (hw->mac.type == fm10k_mac_pf) {
748 /* Establish only VSI 0 as valid */
749 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
751 /* Configure RSS bits used in RETA table */
752 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
753 fls(dev->data->nb_rx_queues - 1) <<
754 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
756 /* Invalidate all other GLORT entries */
757 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
758 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
759 FM10K_DGLORTMAP_NONE);
762 for (i = 0; i < dev->data->nb_rx_queues; i++) {
763 struct fm10k_rx_queue *rxq;
764 rxq = dev->data->rx_queues[i];
766 if (rxq->rx_deferred_start)
768 diag = fm10k_dev_rx_queue_start(dev, i);
771 for (j = 0; j < i; ++j)
772 rx_queue_clean(dev->data->rx_queues[j]);
777 for (i = 0; i < dev->data->nb_tx_queues; i++) {
778 struct fm10k_tx_queue *txq;
779 txq = dev->data->tx_queues[i];
781 if (txq->tx_deferred_start)
783 diag = fm10k_dev_tx_queue_start(dev, i);
786 for (j = 0; j < dev->data->nb_rx_queues; ++j)
787 rx_queue_clean(dev->data->rx_queues[j]);
792 if (hw->mac.default_vid && hw->mac.default_vid <= ETHER_MAX_VLAN_ID) {
793 /* Update default vlan */
794 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
796 /* Add default mac/vlan filter to PF/Switch manager */
797 fm10k_MAC_filter_set(dev, hw->mac.addr, true);
804 fm10k_dev_stop(struct rte_eth_dev *dev)
808 PMD_INIT_FUNC_TRACE();
810 for (i = 0; i < dev->data->nb_tx_queues; i++)
811 fm10k_dev_tx_queue_stop(dev, i);
813 for (i = 0; i < dev->data->nb_rx_queues; i++)
814 fm10k_dev_rx_queue_stop(dev, i);
818 fm10k_dev_close(struct rte_eth_dev *dev)
820 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
822 PMD_INIT_FUNC_TRACE();
824 /* Stop mailbox service first */
825 fm10k_close_mbx_service(hw);
831 fm10k_link_update(struct rte_eth_dev *dev,
832 __rte_unused int wait_to_complete)
834 PMD_INIT_FUNC_TRACE();
836 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
837 * x8 PCIe interface. For now, we leave the speed undefined since there
838 * is no 50Gbps Ethernet. */
839 dev->data->dev_link.link_speed = 0;
840 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
841 dev->data->dev_link.link_status = 1;
847 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
849 uint64_t ipackets, opackets, ibytes, obytes;
850 struct fm10k_hw *hw =
851 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
852 struct fm10k_hw_stats *hw_stats =
853 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
856 PMD_INIT_FUNC_TRACE();
858 fm10k_update_hw_stats(hw, hw_stats);
860 ipackets = opackets = ibytes = obytes = 0;
861 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
862 (i < hw->mac.max_queues); ++i) {
863 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
864 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
865 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
866 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
867 ipackets += stats->q_ipackets[i];
868 opackets += stats->q_opackets[i];
869 ibytes += stats->q_ibytes[i];
870 obytes += stats->q_obytes[i];
872 stats->ipackets = ipackets;
873 stats->opackets = opackets;
874 stats->ibytes = ibytes;
875 stats->obytes = obytes;
879 fm10k_stats_reset(struct rte_eth_dev *dev)
881 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
882 struct fm10k_hw_stats *hw_stats =
883 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
885 PMD_INIT_FUNC_TRACE();
887 memset(hw_stats, 0, sizeof(*hw_stats));
888 fm10k_rebind_hw_stats(hw, hw_stats);
892 fm10k_dev_infos_get(struct rte_eth_dev *dev,
893 struct rte_eth_dev_info *dev_info)
895 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
897 PMD_INIT_FUNC_TRACE();
899 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
900 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
901 dev_info->max_rx_queues = hw->mac.max_queues;
902 dev_info->max_tx_queues = hw->mac.max_queues;
903 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
904 dev_info->max_hash_mac_addrs = 0;
905 dev_info->max_vfs = dev->pci_dev->max_vfs;
906 dev_info->max_vmdq_pools = ETH_64_POOLS;
907 dev_info->rx_offload_capa =
908 DEV_RX_OFFLOAD_IPV4_CKSUM |
909 DEV_RX_OFFLOAD_UDP_CKSUM |
910 DEV_RX_OFFLOAD_TCP_CKSUM;
911 dev_info->tx_offload_capa = 0;
912 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
914 dev_info->default_rxconf = (struct rte_eth_rxconf) {
916 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
917 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
918 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
920 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
924 dev_info->default_txconf = (struct rte_eth_txconf) {
926 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
927 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
928 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
930 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
931 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
932 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
933 ETH_TXQ_FLAGS_NOOFFLOADS,
939 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
942 uint16_t mac_num = 0;
943 uint32_t vid_idx, vid_bit, mac_index;
945 struct fm10k_macvlan_filter_info *macvlan;
946 struct rte_eth_dev_data *data = dev->data;
948 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
949 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
951 /* @todo - add support for the VF */
952 if (hw->mac.type != fm10k_mac_pf) {
953 PMD_INIT_LOG(ERR, "VLAN filter not available on VF");
957 if (vlan_id > ETH_VLAN_ID_MAX) {
958 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
962 vid_idx = FM10K_VFTA_IDX(vlan_id);
963 vid_bit = FM10K_VFTA_BIT(vlan_id);
964 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
965 if (on && (macvlan->vfta[vid_idx] & vid_bit))
967 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
968 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
969 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
970 "in the VLAN filter table");
975 result = fm10k_update_vlan(hw, vlan_id, 0, on);
976 fm10k_mbx_unlock(hw);
977 if (result != FM10K_SUCCESS) {
978 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
982 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
983 (result == FM10K_SUCCESS); mac_index++) {
984 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
986 if (mac_num > macvlan->mac_num - 1) {
987 PMD_INIT_LOG(ERR, "MAC address number "
992 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
993 data->mac_addrs[mac_index].addr_bytes,
995 fm10k_mbx_unlock(hw);
998 if (result != FM10K_SUCCESS) {
999 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1004 macvlan->vlan_num++;
1005 macvlan->vfta[vid_idx] |= vid_bit;
1007 macvlan->vlan_num--;
1008 macvlan->vfta[vid_idx] &= ~vid_bit;
1013 /* Add/Remove a MAC address, and update filters */
1015 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add)
1018 struct fm10k_hw *hw;
1019 struct fm10k_macvlan_filter_info *macvlan;
1021 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1022 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1024 /* @todo - add support for the VF */
1025 if (hw->mac.type != fm10k_mac_pf) {
1026 PMD_INIT_LOG(ERR, "MAC filter not available on VF");
1031 for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1032 if (macvlan->vfta[j]) {
1033 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1034 if (macvlan->vfta[j] & (1 << k)) {
1035 if (i + 1 > macvlan->vlan_num) {
1036 PMD_INIT_LOG(ERR, "vlan number "
1041 fm10k_update_uc_addr(hw,
1042 hw->mac.dglort_map, mac,
1043 j * FM10K_UINT32_BIT_SIZE + k,
1045 fm10k_mbx_unlock(hw);
1058 /* Add a MAC address, and update filters */
1060 fm10k_macaddr_add(struct rte_eth_dev *dev,
1061 struct ether_addr *mac_addr,
1062 __rte_unused uint32_t index,
1063 __rte_unused uint32_t pool)
1065 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE);
1068 /* Remove a MAC address, and update filters */
1070 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1072 struct rte_eth_dev_data *data = dev->data;
1074 if (index < FM10K_MAX_MACADDR_NUM)
1075 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1080 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1082 if ((request < min) || (request > max) || ((request % mult) != 0))
1089 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
1090 * the physical address is required. If the memzone is already created, then
1091 * this function returns a pointer to the existing memzone.
1093 static inline const struct rte_memzone *
1094 allocate_hw_ring(const char *driver_name, const char *ring_name,
1095 uint8_t port_id, uint16_t queue_id, int socket_id,
1096 uint32_t size, uint32_t align)
1098 char name[RTE_MEMZONE_NAMESIZE];
1099 const struct rte_memzone *mz;
1101 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
1102 driver_name, ring_name, port_id, queue_id, socket_id);
1104 /* return the memzone if it already exists */
1105 mz = rte_memzone_lookup(name);
1109 #ifdef RTE_LIBRTE_XEN_DOM0
1110 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
1113 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
1118 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1120 if ((request < min) || (request > max) || ((div % request) != 0))
1127 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1129 uint16_t rx_free_thresh;
1131 if (conf->rx_free_thresh == 0)
1132 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1134 rx_free_thresh = conf->rx_free_thresh;
1136 /* make sure the requested threshold satisfies the constraints */
1137 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1138 FM10K_RX_FREE_THRESH_MAX(q),
1139 FM10K_RX_FREE_THRESH_DIV(q),
1141 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1142 "less than or equal to %u, "
1143 "greater than or equal to %u, "
1144 "and a divisor of %u",
1145 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1146 FM10K_RX_FREE_THRESH_MIN(q),
1147 FM10K_RX_FREE_THRESH_DIV(q));
1151 q->alloc_thresh = rx_free_thresh;
1152 q->drop_en = conf->rx_drop_en;
1153 q->rx_deferred_start = conf->rx_deferred_start;
1159 * Hardware requires specific alignment for Rx packet buffers. At
1160 * least one of the following two conditions must be satisfied.
1161 * 1. Address is 512B aligned
1162 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1164 * As such, the driver may need to adjust the DMA address within the
1165 * buffer by up to 512B.
1167 * return 1 if the element size is valid, otherwise return 0.
1170 mempool_element_size_valid(struct rte_mempool *mp)
1174 /* elt_size includes mbuf header and headroom */
1175 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1176 RTE_PKTMBUF_HEADROOM;
1178 /* account for up to 512B of alignment */
1179 min_size -= FM10K_RX_DATABUF_ALIGN;
1181 /* sanity check for overflow */
1182 if (min_size > mp->elt_size)
1190 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1191 uint16_t nb_desc, unsigned int socket_id,
1192 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1194 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1195 struct fm10k_rx_queue *q;
1196 const struct rte_memzone *mz;
1198 PMD_INIT_FUNC_TRACE();
1200 /* make sure the mempool element size can account for alignment. */
1201 if (!mempool_element_size_valid(mp)) {
1202 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1206 /* make sure a valid number of descriptors have been requested */
1207 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1208 FM10K_MULT_RX_DESC, nb_desc)) {
1209 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1210 "less than or equal to %"PRIu32", "
1211 "greater than or equal to %u, "
1212 "and a multiple of %u",
1213 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1214 FM10K_MULT_RX_DESC);
1219 * if this queue existed already, free the associated memory. The
1220 * queue cannot be reused in case we need to allocate memory on
1221 * different socket than was previously used.
1223 if (dev->data->rx_queues[queue_id] != NULL) {
1224 rx_queue_free(dev->data->rx_queues[queue_id]);
1225 dev->data->rx_queues[queue_id] = NULL;
1228 /* allocate memory for the queue structure */
1229 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1232 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1238 q->nb_desc = nb_desc;
1239 q->port_id = dev->data->port_id;
1240 q->queue_id = queue_id;
1241 q->tail_ptr = (volatile uint32_t *)
1242 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1243 if (handle_rxconf(q, conf))
1246 /* allocate memory for the software ring */
1247 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1248 nb_desc * sizeof(struct rte_mbuf *),
1249 RTE_CACHE_LINE_SIZE, socket_id);
1250 if (q->sw_ring == NULL) {
1251 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1257 * allocate memory for the hardware descriptor ring. A memzone large
1258 * enough to hold the maximum ring size is requested to allow for
1259 * resizing in later calls to the queue setup function.
1261 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1262 dev->data->port_id, queue_id, socket_id,
1263 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1265 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1266 rte_free(q->sw_ring);
1270 q->hw_ring = mz->addr;
1271 q->hw_ring_phys_addr = mz->phys_addr;
1273 dev->data->rx_queues[queue_id] = q;
1278 fm10k_rx_queue_release(void *queue)
1280 PMD_INIT_FUNC_TRACE();
1282 rx_queue_free(queue);
1286 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1288 uint16_t tx_free_thresh;
1289 uint16_t tx_rs_thresh;
1291 /* constraint MACROs require that tx_free_thresh is configured
1292 * before tx_rs_thresh */
1293 if (conf->tx_free_thresh == 0)
1294 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1296 tx_free_thresh = conf->tx_free_thresh;
1298 /* make sure the requested threshold satisfies the constraints */
1299 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1300 FM10K_TX_FREE_THRESH_MAX(q),
1301 FM10K_TX_FREE_THRESH_DIV(q),
1303 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1304 "less than or equal to %u, "
1305 "greater than or equal to %u, "
1306 "and a divisor of %u",
1307 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1308 FM10K_TX_FREE_THRESH_MIN(q),
1309 FM10K_TX_FREE_THRESH_DIV(q));
1313 q->free_thresh = tx_free_thresh;
1315 if (conf->tx_rs_thresh == 0)
1316 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1318 tx_rs_thresh = conf->tx_rs_thresh;
1320 q->tx_deferred_start = conf->tx_deferred_start;
1322 /* make sure the requested threshold satisfies the constraints */
1323 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1324 FM10K_TX_RS_THRESH_MAX(q),
1325 FM10K_TX_RS_THRESH_DIV(q),
1327 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1328 "less than or equal to %u, "
1329 "greater than or equal to %u, "
1330 "and a divisor of %u",
1331 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1332 FM10K_TX_RS_THRESH_MIN(q),
1333 FM10K_TX_RS_THRESH_DIV(q));
1337 q->rs_thresh = tx_rs_thresh;
1343 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1344 uint16_t nb_desc, unsigned int socket_id,
1345 const struct rte_eth_txconf *conf)
1347 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1348 struct fm10k_tx_queue *q;
1349 const struct rte_memzone *mz;
1351 PMD_INIT_FUNC_TRACE();
1353 /* make sure a valid number of descriptors have been requested */
1354 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1355 FM10K_MULT_TX_DESC, nb_desc)) {
1356 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1357 "less than or equal to %"PRIu32", "
1358 "greater than or equal to %u, "
1359 "and a multiple of %u",
1360 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1361 FM10K_MULT_TX_DESC);
1366 * if this queue existed already, free the associated memory. The
1367 * queue cannot be reused in case we need to allocate memory on
1368 * different socket than was previously used.
1370 if (dev->data->tx_queues[queue_id] != NULL) {
1371 tx_queue_free(dev->data->tx_queues[queue_id]);
1372 dev->data->tx_queues[queue_id] = NULL;
1375 /* allocate memory for the queue structure */
1376 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1379 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1384 q->nb_desc = nb_desc;
1385 q->port_id = dev->data->port_id;
1386 q->queue_id = queue_id;
1387 q->tail_ptr = (volatile uint32_t *)
1388 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1389 if (handle_txconf(q, conf))
1392 /* allocate memory for the software ring */
1393 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1394 nb_desc * sizeof(struct rte_mbuf *),
1395 RTE_CACHE_LINE_SIZE, socket_id);
1396 if (q->sw_ring == NULL) {
1397 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1403 * allocate memory for the hardware descriptor ring. A memzone large
1404 * enough to hold the maximum ring size is requested to allow for
1405 * resizing in later calls to the queue setup function.
1407 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1408 dev->data->port_id, queue_id, socket_id,
1409 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1411 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1412 rte_free(q->sw_ring);
1416 q->hw_ring = mz->addr;
1417 q->hw_ring_phys_addr = mz->phys_addr;
1420 * allocate memory for the RS bit tracker. Enough slots to hold the
1421 * descriptor index for each RS bit needing to be set are required.
1423 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1424 ((nb_desc + 1) / q->rs_thresh) *
1426 RTE_CACHE_LINE_SIZE, socket_id);
1427 if (q->rs_tracker.list == NULL) {
1428 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1429 rte_free(q->sw_ring);
1434 dev->data->tx_queues[queue_id] = q;
1439 fm10k_tx_queue_release(void *queue)
1441 PMD_INIT_FUNC_TRACE();
1443 tx_queue_free(queue);
1447 fm10k_reta_update(struct rte_eth_dev *dev,
1448 struct rte_eth_rss_reta_entry64 *reta_conf,
1451 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1452 uint16_t i, j, idx, shift;
1456 PMD_INIT_FUNC_TRACE();
1458 if (reta_size > FM10K_MAX_RSS_INDICES) {
1459 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1460 "(%d) doesn't match the number hardware can supported "
1461 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1466 * Update Redirection Table RETA[n], n=0..31. The redirection table has
1467 * 128-entries in 32 registers
1469 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1470 idx = i / RTE_RETA_GROUP_SIZE;
1471 shift = i % RTE_RETA_GROUP_SIZE;
1472 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1473 BIT_MASK_PER_UINT32);
1478 if (mask != BIT_MASK_PER_UINT32)
1479 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1481 for (j = 0; j < CHARS_PER_UINT32; j++) {
1482 if (mask & (0x1 << j)) {
1484 reta &= ~(UINT8_MAX << CHAR_BIT * j);
1485 reta |= reta_conf[idx].reta[shift + j] <<
1489 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1496 fm10k_reta_query(struct rte_eth_dev *dev,
1497 struct rte_eth_rss_reta_entry64 *reta_conf,
1500 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1501 uint16_t i, j, idx, shift;
1505 PMD_INIT_FUNC_TRACE();
1507 if (reta_size < FM10K_MAX_RSS_INDICES) {
1508 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1509 "(%d) doesn't match the number hardware can supported "
1510 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1515 * Read Redirection Table RETA[n], n=0..31. The redirection table has
1516 * 128-entries in 32 registers
1518 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1519 idx = i / RTE_RETA_GROUP_SIZE;
1520 shift = i % RTE_RETA_GROUP_SIZE;
1521 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1522 BIT_MASK_PER_UINT32);
1526 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1527 for (j = 0; j < CHARS_PER_UINT32; j++) {
1528 if (mask & (0x1 << j))
1529 reta_conf[idx].reta[shift + j] = ((reta >>
1530 CHAR_BIT * j) & UINT8_MAX);
1538 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1539 struct rte_eth_rss_conf *rss_conf)
1541 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1542 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1544 uint64_t hf = rss_conf->rss_hf;
1547 PMD_INIT_FUNC_TRACE();
1549 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1550 FM10K_RSSRK_ENTRIES_PER_REG)
1557 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
1558 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
1559 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
1560 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
1561 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
1562 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
1563 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
1564 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
1565 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
1567 /* If the mapping doesn't fit any supported, return */
1572 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1573 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1575 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1581 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1582 struct rte_eth_rss_conf *rss_conf)
1584 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1585 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1590 PMD_INIT_FUNC_TRACE();
1592 if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1593 FM10K_RSSRK_ENTRIES_PER_REG)
1597 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1598 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1600 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1602 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
1603 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
1604 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
1605 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
1606 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
1607 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
1608 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
1609 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
1610 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
1612 rss_conf->rss_hf = hf;
1618 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1620 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1621 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1623 /* Bind all local non-queue interrupt to vector 0 */
1626 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1627 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1628 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1629 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1630 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1631 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1633 /* Enable misc causes */
1634 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1635 FM10K_EIMR_ENABLE(THI_FAULT) |
1636 FM10K_EIMR_ENABLE(FUM_FAULT) |
1637 FM10K_EIMR_ENABLE(MAILBOX) |
1638 FM10K_EIMR_ENABLE(SWITCHREADY) |
1639 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1640 FM10K_EIMR_ENABLE(SRAMERROR) |
1641 FM10K_EIMR_ENABLE(VFLR));
1644 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1645 FM10K_ITR_MASK_CLEAR);
1646 FM10K_WRITE_FLUSH(hw);
1650 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1652 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1653 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1655 /* Bind all local non-queue interrupt to vector 0 */
1658 /* Only INT 0 available, other 15 are reserved. */
1659 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1662 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1663 FM10K_ITR_MASK_CLEAR);
1664 FM10K_WRITE_FLUSH(hw);
1668 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
1670 struct fm10k_fault fault;
1672 const char *estr = "Unknown error";
1674 /* Process PCA fault */
1675 if (eicr & FM10K_EIMR_PCA_FAULT) {
1676 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
1679 switch (fault.type) {
1681 estr = "PCA_NO_FAULT"; break;
1682 case PCA_UNMAPPED_ADDR:
1683 estr = "PCA_UNMAPPED_ADDR"; break;
1684 case PCA_BAD_QACCESS_PF:
1685 estr = "PCA_BAD_QACCESS_PF"; break;
1686 case PCA_BAD_QACCESS_VF:
1687 estr = "PCA_BAD_QACCESS_VF"; break;
1688 case PCA_MALICIOUS_REQ:
1689 estr = "PCA_MALICIOUS_REQ"; break;
1690 case PCA_POISONED_TLP:
1691 estr = "PCA_POISONED_TLP"; break;
1693 estr = "PCA_TLP_ABORT"; break;
1697 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1698 estr, fault.func ? "VF" : "PF", fault.func,
1699 fault.address, fault.specinfo);
1702 /* Process THI fault */
1703 if (eicr & FM10K_EIMR_THI_FAULT) {
1704 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
1707 switch (fault.type) {
1709 estr = "THI_NO_FAULT"; break;
1710 case THI_MAL_DIS_Q_FAULT:
1711 estr = "THI_MAL_DIS_Q_FAULT"; break;
1715 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1716 estr, fault.func ? "VF" : "PF", fault.func,
1717 fault.address, fault.specinfo);
1720 /* Process FUM fault */
1721 if (eicr & FM10K_EIMR_FUM_FAULT) {
1722 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
1725 switch (fault.type) {
1727 estr = "FUM_NO_FAULT"; break;
1728 case FUM_UNMAPPED_ADDR:
1729 estr = "FUM_UNMAPPED_ADDR"; break;
1730 case FUM_POISONED_TLP:
1731 estr = "FUM_POISONED_TLP"; break;
1732 case FUM_BAD_VF_QACCESS:
1733 estr = "FUM_BAD_VF_QACCESS"; break;
1734 case FUM_ADD_DECODE_ERR:
1735 estr = "FUM_ADD_DECODE_ERR"; break;
1737 estr = "FUM_RO_ERROR"; break;
1738 case FUM_QPRC_CRC_ERROR:
1739 estr = "FUM_QPRC_CRC_ERROR"; break;
1740 case FUM_CSR_TIMEOUT:
1741 estr = "FUM_CSR_TIMEOUT"; break;
1742 case FUM_INVALID_TYPE:
1743 estr = "FUM_INVALID_TYPE"; break;
1744 case FUM_INVALID_LENGTH:
1745 estr = "FUM_INVALID_LENGTH"; break;
1746 case FUM_INVALID_BE:
1747 estr = "FUM_INVALID_BE"; break;
1748 case FUM_INVALID_ALIGN:
1749 estr = "FUM_INVALID_ALIGN"; break;
1753 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1754 estr, fault.func ? "VF" : "PF", fault.func,
1755 fault.address, fault.specinfo);
1762 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
1767 * PF interrupt handler triggered by NIC for handling specific interrupt.
1770 * Pointer to interrupt handle.
1772 * The address of parameter (struct rte_eth_dev *) regsitered before.
1778 fm10k_dev_interrupt_handler_pf(
1779 __rte_unused struct rte_intr_handle *handle,
1782 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1783 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1784 uint32_t cause, status;
1786 if (hw->mac.type != fm10k_mac_pf)
1789 cause = FM10K_READ_REG(hw, FM10K_EICR);
1791 /* Handle PCI fault cases */
1792 if (cause & FM10K_EICR_FAULT_MASK) {
1793 PMD_INIT_LOG(ERR, "INT: find fault!");
1794 fm10k_dev_handle_fault(hw, cause);
1797 /* Handle switch up/down */
1798 if (cause & FM10K_EICR_SWITCHNOTREADY)
1799 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
1801 if (cause & FM10K_EICR_SWITCHREADY)
1802 PMD_INIT_LOG(INFO, "INT: Switch is ready");
1804 /* Handle mailbox message */
1806 hw->mbx.ops.process(hw, &hw->mbx);
1807 fm10k_mbx_unlock(hw);
1809 /* Handle SRAM error */
1810 if (cause & FM10K_EICR_SRAMERROR) {
1811 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
1813 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
1814 /* Write to clear pending bits */
1815 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
1817 /* Todo: print out error message after shared code updates */
1820 /* Clear these 3 events if having any */
1821 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
1822 FM10K_EICR_SWITCHREADY;
1824 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
1826 /* Re-enable interrupt from device side */
1827 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1828 FM10K_ITR_MASK_CLEAR);
1829 /* Re-enable interrupt from host side */
1830 rte_intr_enable(&(dev->pci_dev->intr_handle));
1834 * VF interrupt handler triggered by NIC for handling specific interrupt.
1837 * Pointer to interrupt handle.
1839 * The address of parameter (struct rte_eth_dev *) regsitered before.
1845 fm10k_dev_interrupt_handler_vf(
1846 __rte_unused struct rte_intr_handle *handle,
1849 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1850 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1852 if (hw->mac.type != fm10k_mac_vf)
1855 /* Handle mailbox message if lock is acquired */
1857 hw->mbx.ops.process(hw, &hw->mbx);
1858 fm10k_mbx_unlock(hw);
1860 /* Re-enable interrupt from device side */
1861 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1862 FM10K_ITR_MASK_CLEAR);
1863 /* Re-enable interrupt from host side */
1864 rte_intr_enable(&(dev->pci_dev->intr_handle));
1867 /* Mailbox message handler in VF */
1868 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1869 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1870 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1871 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1872 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1875 /* Mailbox message handler in PF */
1876 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1877 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1878 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1879 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1880 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1881 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1882 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1883 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1887 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1891 /* Initialize mailbox lock */
1892 fm10k_mbx_initlock(hw);
1894 /* Replace default message handler with new ones */
1895 if (hw->mac.type == fm10k_mac_pf)
1896 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
1898 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
1901 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
1905 /* Connect to SM for PF device or PF for VF device */
1906 return hw->mbx.ops.connect(hw, &hw->mbx);
1910 fm10k_close_mbx_service(struct fm10k_hw *hw)
1912 /* Disconnect from SM for PF device or PF for VF device */
1913 hw->mbx.ops.disconnect(hw, &hw->mbx);
1916 static const struct eth_dev_ops fm10k_eth_dev_ops = {
1917 .dev_configure = fm10k_dev_configure,
1918 .dev_start = fm10k_dev_start,
1919 .dev_stop = fm10k_dev_stop,
1920 .dev_close = fm10k_dev_close,
1921 .promiscuous_enable = fm10k_dev_promiscuous_enable,
1922 .promiscuous_disable = fm10k_dev_promiscuous_disable,
1923 .allmulticast_enable = fm10k_dev_allmulticast_enable,
1924 .allmulticast_disable = fm10k_dev_allmulticast_disable,
1925 .stats_get = fm10k_stats_get,
1926 .stats_reset = fm10k_stats_reset,
1927 .link_update = fm10k_link_update,
1928 .dev_infos_get = fm10k_dev_infos_get,
1929 .vlan_filter_set = fm10k_vlan_filter_set,
1930 .mac_addr_add = fm10k_macaddr_add,
1931 .mac_addr_remove = fm10k_macaddr_remove,
1932 .rx_queue_start = fm10k_dev_rx_queue_start,
1933 .rx_queue_stop = fm10k_dev_rx_queue_stop,
1934 .tx_queue_start = fm10k_dev_tx_queue_start,
1935 .tx_queue_stop = fm10k_dev_tx_queue_stop,
1936 .rx_queue_setup = fm10k_rx_queue_setup,
1937 .rx_queue_release = fm10k_rx_queue_release,
1938 .tx_queue_setup = fm10k_tx_queue_setup,
1939 .tx_queue_release = fm10k_tx_queue_release,
1940 .reta_update = fm10k_reta_update,
1941 .reta_query = fm10k_reta_query,
1942 .rss_hash_update = fm10k_rss_hash_update,
1943 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
1947 eth_fm10k_dev_init(struct rte_eth_dev *dev)
1949 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1951 struct fm10k_macvlan_filter_info *macvlan;
1953 PMD_INIT_FUNC_TRACE();
1955 dev->dev_ops = &fm10k_eth_dev_ops;
1956 dev->rx_pkt_burst = &fm10k_recv_pkts;
1957 dev->tx_pkt_burst = &fm10k_xmit_pkts;
1959 if (dev->data->scattered_rx)
1960 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
1962 /* only initialize in the primary process */
1963 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1966 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1967 memset(macvlan, 0, sizeof(*macvlan));
1968 /* Vendor and Device ID need to be set before init of shared code */
1969 memset(hw, 0, sizeof(*hw));
1970 hw->device_id = dev->pci_dev->id.device_id;
1971 hw->vendor_id = dev->pci_dev->id.vendor_id;
1972 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
1973 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
1974 hw->revision_id = 0;
1975 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
1976 if (hw->hw_addr == NULL) {
1977 PMD_INIT_LOG(ERR, "Bad mem resource."
1978 " Try to blacklist unused devices.");
1982 /* Store fm10k_adapter pointer */
1983 hw->back = dev->data->dev_private;
1985 /* Initialize the shared code */
1986 diag = fm10k_init_shared_code(hw);
1987 if (diag != FM10K_SUCCESS) {
1988 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1993 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
1994 * there is no way to get link status without reading BAR4. Until this
1995 * works, assume we have maximum bandwidth.
1996 * @todo - fix bus info
1998 hw->bus_caps.speed = fm10k_bus_speed_8000;
1999 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2000 hw->bus_caps.payload = fm10k_bus_payload_512;
2001 hw->bus.speed = fm10k_bus_speed_8000;
2002 hw->bus.width = fm10k_bus_width_pcie_x8;
2003 hw->bus.payload = fm10k_bus_payload_256;
2005 /* Initialize the hw */
2006 diag = fm10k_init_hw(hw);
2007 if (diag != FM10K_SUCCESS) {
2008 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2012 /* Initialize MAC address(es) */
2013 dev->data->mac_addrs = rte_zmalloc("fm10k",
2014 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2015 if (dev->data->mac_addrs == NULL) {
2016 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2020 diag = fm10k_read_mac_addr(hw);
2022 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2023 &dev->data->mac_addrs[0]);
2025 if (diag != FM10K_SUCCESS ||
2026 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2028 /* Generate a random addr */
2029 eth_random_addr(hw->mac.addr);
2030 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2031 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2032 &dev->data->mac_addrs[0]);
2035 /* Reset the hw statistics */
2036 fm10k_stats_reset(dev);
2039 diag = fm10k_reset_hw(hw);
2040 if (diag != FM10K_SUCCESS) {
2041 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2045 /* Setup mailbox service */
2046 diag = fm10k_setup_mbx_service(hw);
2047 if (diag != FM10K_SUCCESS) {
2048 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2052 /*PF/VF has different interrupt handling mechanism */
2053 if (hw->mac.type == fm10k_mac_pf) {
2054 /* register callback func to eal lib */
2055 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2056 fm10k_dev_interrupt_handler_pf, (void *)dev);
2058 /* enable MISC interrupt */
2059 fm10k_dev_enable_intr_pf(dev);
2061 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2062 fm10k_dev_interrupt_handler_vf, (void *)dev);
2064 fm10k_dev_enable_intr_vf(dev);
2067 /* Enable uio intr after callback registered */
2068 rte_intr_enable(&(dev->pci_dev->intr_handle));
2070 hw->mac.ops.update_int_moderator(hw);
2072 /* Make sure Switch Manager is ready before going forward. */
2073 if (hw->mac.type == fm10k_mac_pf) {
2074 int switch_ready = 0;
2077 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2079 hw->mac.ops.get_host_state(hw, &switch_ready);
2080 fm10k_mbx_unlock(hw);
2083 /* Delay some time to acquire async LPORT_MAP info. */
2084 rte_delay_us(WAIT_SWITCH_MSG_US);
2087 if (switch_ready == 0) {
2088 PMD_INIT_LOG(ERR, "switch is not ready");
2094 * Below function will trigger operations on mailbox, acquire lock to
2095 * avoid race condition from interrupt handler. Operations on mailbox
2096 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2097 * will handle and generate an interrupt to our side. Then, FIFO in
2098 * mailbox will be touched.
2101 /* Enable port first */
2102 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2105 * Add default mac. glort is assigned by SM for PF, while is
2106 * unused for VF. PF will assign correct glort for VF.
2108 hw->mac.ops.update_uc_addr(hw, hw->mac.dglort_map, hw->mac.addr,
2111 /* Set unicast mode by default. App can change to other mode in other
2114 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2115 FM10K_XCAST_MODE_NONE);
2117 fm10k_mbx_unlock(hw);
2124 * The set of PCI devices this driver supports. This driver will enable both PF
2125 * and SRIOV-VF devices.
2127 static const struct rte_pci_id pci_id_fm10k_map[] = {
2128 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2129 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2130 #include "rte_pci_dev_ids.h"
2131 { .vendor_id = 0, /* sentinel */ },
2134 static struct eth_driver rte_pmd_fm10k = {
2136 .name = "rte_pmd_fm10k",
2137 .id_table = pci_id_fm10k_map,
2138 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2140 .eth_dev_init = eth_fm10k_dev_init,
2141 .dev_private_size = sizeof(struct fm10k_adapter),
2145 * Driver initialization routine.
2146 * Invoked once at EAL init time.
2147 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2150 rte_pmd_fm10k_init(__rte_unused const char *name,
2151 __rte_unused const char *params)
2153 PMD_INIT_FUNC_TRACE();
2154 rte_eth_driver_register(&rte_pmd_fm10k);
2158 static struct rte_driver rte_fm10k_driver = {
2160 .init = rte_pmd_fm10k_init,
2163 PMD_REGISTER_DRIVER(rte_fm10k_driver);