4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
39 #include <rte_spinlock.h>
42 #include "base/fm10k_api.h"
44 #define FM10K_RX_BUFF_ALIGN 512
45 /* Default delay to acquire mailbox lock */
46 #define FM10K_MBXLOCK_DELAY_US 20
48 /* Number of chars per uint32 type */
49 #define CHARS_PER_UINT32 (sizeof(uint32_t))
50 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
53 fm10k_mbx_initlock(struct fm10k_hw *hw)
55 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
59 fm10k_mbx_lock(struct fm10k_hw *hw)
61 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
62 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
66 fm10k_mbx_unlock(struct fm10k_hw *hw)
68 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
72 * clean queue, descriptor rings, free software buffers used when stopping
76 rx_queue_clean(struct fm10k_rx_queue *q)
78 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
80 PMD_INIT_FUNC_TRACE();
82 /* zero descriptor rings */
83 for (i = 0; i < q->nb_desc; ++i)
86 /* free software buffers */
87 for (i = 0; i < q->nb_desc; ++i) {
89 rte_pktmbuf_free_seg(q->sw_ring[i]);
96 * free all queue memory used when releasing the queue (i.e. configure)
99 rx_queue_free(struct fm10k_rx_queue *q)
101 PMD_INIT_FUNC_TRACE();
103 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
106 rte_free(q->sw_ring);
112 * clean queue, descriptor rings, free software buffers used when stopping
116 tx_queue_clean(struct fm10k_tx_queue *q)
118 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
120 PMD_INIT_FUNC_TRACE();
122 /* zero descriptor rings */
123 for (i = 0; i < q->nb_desc; ++i)
124 q->hw_ring[i] = zero;
126 /* free software buffers */
127 for (i = 0; i < q->nb_desc; ++i) {
129 rte_pktmbuf_free_seg(q->sw_ring[i]);
130 q->sw_ring[i] = NULL;
136 * free all queue memory used when releasing the queue (i.e. configure)
139 tx_queue_free(struct fm10k_tx_queue *q)
141 PMD_INIT_FUNC_TRACE();
143 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
145 if (q->rs_tracker.list)
146 rte_free(q->rs_tracker.list);
148 rte_free(q->sw_ring);
154 fm10k_dev_configure(struct rte_eth_dev *dev)
156 PMD_INIT_FUNC_TRACE();
158 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
159 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
165 fm10k_link_update(struct rte_eth_dev *dev,
166 __rte_unused int wait_to_complete)
168 PMD_INIT_FUNC_TRACE();
170 /* The host-interface link is always up. The speed is ~50Gbps per Gen3
171 * x8 PCIe interface. For now, we leave the speed undefined since there
172 * is no 50Gbps Ethernet. */
173 dev->data->dev_link.link_speed = 0;
174 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
175 dev->data->dev_link.link_status = 1;
181 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
183 uint64_t ipackets, opackets, ibytes, obytes;
184 struct fm10k_hw *hw =
185 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
186 struct fm10k_hw_stats *hw_stats =
187 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
190 PMD_INIT_FUNC_TRACE();
192 fm10k_update_hw_stats(hw, hw_stats);
194 ipackets = opackets = ibytes = obytes = 0;
195 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
196 (i < FM10K_MAX_QUEUES_PF); ++i) {
197 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
198 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
199 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
200 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
201 ipackets += stats->q_ipackets[i];
202 opackets += stats->q_opackets[i];
203 ibytes += stats->q_ibytes[i];
204 obytes += stats->q_obytes[i];
206 stats->ipackets = ipackets;
207 stats->opackets = opackets;
208 stats->ibytes = ibytes;
209 stats->obytes = obytes;
213 fm10k_stats_reset(struct rte_eth_dev *dev)
215 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
216 struct fm10k_hw_stats *hw_stats =
217 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
219 PMD_INIT_FUNC_TRACE();
221 memset(hw_stats, 0, sizeof(*hw_stats));
222 fm10k_rebind_hw_stats(hw, hw_stats);
226 fm10k_dev_infos_get(struct rte_eth_dev *dev,
227 struct rte_eth_dev_info *dev_info)
229 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
231 PMD_INIT_FUNC_TRACE();
233 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
234 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
235 dev_info->max_rx_queues = hw->mac.max_queues;
236 dev_info->max_tx_queues = hw->mac.max_queues;
237 dev_info->max_mac_addrs = 1;
238 dev_info->max_hash_mac_addrs = 0;
239 dev_info->max_vfs = FM10K_MAX_VF_NUM;
240 dev_info->max_vmdq_pools = ETH_64_POOLS;
241 dev_info->rx_offload_capa =
242 DEV_RX_OFFLOAD_IPV4_CKSUM |
243 DEV_RX_OFFLOAD_UDP_CKSUM |
244 DEV_RX_OFFLOAD_TCP_CKSUM;
245 dev_info->tx_offload_capa = 0;
246 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
248 dev_info->default_rxconf = (struct rte_eth_rxconf) {
250 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
251 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
252 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
254 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
258 dev_info->default_txconf = (struct rte_eth_txconf) {
260 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
261 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
262 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
264 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
265 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
266 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
267 ETH_TXQ_FLAGS_NOOFFLOADS,
273 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
275 if ((request < min) || (request > max) || ((request % mult) != 0))
282 * Create a memzone for hardware descriptor rings. Malloc cannot be used since
283 * the physical address is required. If the memzone is already created, then
284 * this function returns a pointer to the existing memzone.
286 static inline const struct rte_memzone *
287 allocate_hw_ring(const char *driver_name, const char *ring_name,
288 uint8_t port_id, uint16_t queue_id, int socket_id,
289 uint32_t size, uint32_t align)
291 char name[RTE_MEMZONE_NAMESIZE];
292 const struct rte_memzone *mz;
294 snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
295 driver_name, ring_name, port_id, queue_id, socket_id);
297 /* return the memzone if it already exists */
298 mz = rte_memzone_lookup(name);
302 #ifdef RTE_LIBRTE_XEN_DOM0
303 return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
306 return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
311 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
313 if ((request < min) || (request > max) || ((div % request) != 0))
320 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
322 uint16_t rx_free_thresh;
324 if (conf->rx_free_thresh == 0)
325 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
327 rx_free_thresh = conf->rx_free_thresh;
329 /* make sure the requested threshold satisfies the constraints */
330 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
331 FM10K_RX_FREE_THRESH_MAX(q),
332 FM10K_RX_FREE_THRESH_DIV(q),
334 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
335 "less than or equal to %u, "
336 "greater than or equal to %u, "
337 "and a divisor of %u",
338 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
339 FM10K_RX_FREE_THRESH_MIN(q),
340 FM10K_RX_FREE_THRESH_DIV(q));
344 q->alloc_thresh = rx_free_thresh;
345 q->drop_en = conf->rx_drop_en;
346 q->rx_deferred_start = conf->rx_deferred_start;
352 * Hardware requires specific alignment for Rx packet buffers. At
353 * least one of the following two conditions must be satisfied.
354 * 1. Address is 512B aligned
355 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
357 * As such, the driver may need to adjust the DMA address within the
358 * buffer by up to 512B. The mempool element size is checked here
359 * to make sure a maximally sized Ethernet frame can still be wholly
360 * contained within the buffer after 512B alignment.
362 * return 1 if the element size is valid, otherwise return 0.
365 mempool_element_size_valid(struct rte_mempool *mp)
369 /* elt_size includes mbuf header and headroom */
370 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
371 RTE_PKTMBUF_HEADROOM;
373 /* account for up to 512B of alignment */
374 min_size -= FM10K_RX_BUFF_ALIGN;
376 /* sanity check for overflow */
377 if (min_size > mp->elt_size)
380 if (min_size < ETHER_MAX_VLAN_FRAME_LEN)
388 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
389 uint16_t nb_desc, unsigned int socket_id,
390 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
392 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
393 struct fm10k_rx_queue *q;
394 const struct rte_memzone *mz;
396 PMD_INIT_FUNC_TRACE();
398 /* make sure the mempool element size can account for alignment. */
399 if (!mempool_element_size_valid(mp)) {
400 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
404 /* make sure a valid number of descriptors have been requested */
405 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
406 FM10K_MULT_RX_DESC, nb_desc)) {
407 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
408 "less than or equal to %"PRIu32", "
409 "greater than or equal to %u, "
410 "and a multiple of %u",
411 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
417 * if this queue existed already, free the associated memory. The
418 * queue cannot be reused in case we need to allocate memory on
419 * different socket than was previously used.
421 if (dev->data->rx_queues[queue_id] != NULL) {
422 rx_queue_free(dev->data->rx_queues[queue_id]);
423 dev->data->rx_queues[queue_id] = NULL;
426 /* allocate memory for the queue structure */
427 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
430 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
436 q->nb_desc = nb_desc;
437 q->port_id = dev->data->port_id;
438 q->queue_id = queue_id;
439 q->tail_ptr = (volatile uint32_t *)
440 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
441 if (handle_rxconf(q, conf))
444 /* allocate memory for the software ring */
445 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
446 nb_desc * sizeof(struct rte_mbuf *),
447 RTE_CACHE_LINE_SIZE, socket_id);
448 if (q->sw_ring == NULL) {
449 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
455 * allocate memory for the hardware descriptor ring. A memzone large
456 * enough to hold the maximum ring size is requested to allow for
457 * resizing in later calls to the queue setup function.
459 mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
460 dev->data->port_id, queue_id, socket_id,
461 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
463 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
464 rte_free(q->sw_ring);
468 q->hw_ring = mz->addr;
469 q->hw_ring_phys_addr = mz->phys_addr;
471 dev->data->rx_queues[queue_id] = q;
476 fm10k_rx_queue_release(void *queue)
478 PMD_INIT_FUNC_TRACE();
480 rx_queue_free(queue);
484 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
486 uint16_t tx_free_thresh;
487 uint16_t tx_rs_thresh;
489 /* constraint MACROs require that tx_free_thresh is configured
490 * before tx_rs_thresh */
491 if (conf->tx_free_thresh == 0)
492 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
494 tx_free_thresh = conf->tx_free_thresh;
496 /* make sure the requested threshold satisfies the constraints */
497 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
498 FM10K_TX_FREE_THRESH_MAX(q),
499 FM10K_TX_FREE_THRESH_DIV(q),
501 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
502 "less than or equal to %u, "
503 "greater than or equal to %u, "
504 "and a divisor of %u",
505 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
506 FM10K_TX_FREE_THRESH_MIN(q),
507 FM10K_TX_FREE_THRESH_DIV(q));
511 q->free_thresh = tx_free_thresh;
513 if (conf->tx_rs_thresh == 0)
514 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
516 tx_rs_thresh = conf->tx_rs_thresh;
518 q->tx_deferred_start = conf->tx_deferred_start;
520 /* make sure the requested threshold satisfies the constraints */
521 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
522 FM10K_TX_RS_THRESH_MAX(q),
523 FM10K_TX_RS_THRESH_DIV(q),
525 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
526 "less than or equal to %u, "
527 "greater than or equal to %u, "
528 "and a divisor of %u",
529 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
530 FM10K_TX_RS_THRESH_MIN(q),
531 FM10K_TX_RS_THRESH_DIV(q));
535 q->rs_thresh = tx_rs_thresh;
541 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
542 uint16_t nb_desc, unsigned int socket_id,
543 const struct rte_eth_txconf *conf)
545 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
546 struct fm10k_tx_queue *q;
547 const struct rte_memzone *mz;
549 PMD_INIT_FUNC_TRACE();
551 /* make sure a valid number of descriptors have been requested */
552 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
553 FM10K_MULT_TX_DESC, nb_desc)) {
554 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
555 "less than or equal to %"PRIu32", "
556 "greater than or equal to %u, "
557 "and a multiple of %u",
558 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
564 * if this queue existed already, free the associated memory. The
565 * queue cannot be reused in case we need to allocate memory on
566 * different socket than was previously used.
568 if (dev->data->tx_queues[queue_id] != NULL) {
569 tx_queue_free(dev->data->tx_queues[queue_id]);
570 dev->data->tx_queues[queue_id] = NULL;
573 /* allocate memory for the queue structure */
574 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
577 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
582 q->nb_desc = nb_desc;
583 q->port_id = dev->data->port_id;
584 q->queue_id = queue_id;
585 q->tail_ptr = (volatile uint32_t *)
586 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
587 if (handle_txconf(q, conf))
590 /* allocate memory for the software ring */
591 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
592 nb_desc * sizeof(struct rte_mbuf *),
593 RTE_CACHE_LINE_SIZE, socket_id);
594 if (q->sw_ring == NULL) {
595 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
601 * allocate memory for the hardware descriptor ring. A memzone large
602 * enough to hold the maximum ring size is requested to allow for
603 * resizing in later calls to the queue setup function.
605 mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
606 dev->data->port_id, queue_id, socket_id,
607 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
609 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
610 rte_free(q->sw_ring);
614 q->hw_ring = mz->addr;
615 q->hw_ring_phys_addr = mz->phys_addr;
618 * allocate memory for the RS bit tracker. Enough slots to hold the
619 * descriptor index for each RS bit needing to be set are required.
621 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
622 ((nb_desc + 1) / q->rs_thresh) *
624 RTE_CACHE_LINE_SIZE, socket_id);
625 if (q->rs_tracker.list == NULL) {
626 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
627 rte_free(q->sw_ring);
632 dev->data->tx_queues[queue_id] = q;
637 fm10k_tx_queue_release(void *queue)
639 PMD_INIT_FUNC_TRACE();
641 tx_queue_free(queue);
645 fm10k_reta_update(struct rte_eth_dev *dev,
646 struct rte_eth_rss_reta_entry64 *reta_conf,
649 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
650 uint16_t i, j, idx, shift;
654 PMD_INIT_FUNC_TRACE();
656 if (reta_size > FM10K_MAX_RSS_INDICES) {
657 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
658 "(%d) doesn't match the number hardware can supported "
659 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
664 * Update Redirection Table RETA[n], n=0..31. The redirection table has
665 * 128-entries in 32 registers
667 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
668 idx = i / RTE_RETA_GROUP_SIZE;
669 shift = i % RTE_RETA_GROUP_SIZE;
670 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
671 BIT_MASK_PER_UINT32);
676 if (mask != BIT_MASK_PER_UINT32)
677 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
679 for (j = 0; j < CHARS_PER_UINT32; j++) {
680 if (mask & (0x1 << j)) {
682 reta &= ~(UINT8_MAX << CHAR_BIT * j);
683 reta |= reta_conf[idx].reta[shift + j] <<
687 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
694 fm10k_reta_query(struct rte_eth_dev *dev,
695 struct rte_eth_rss_reta_entry64 *reta_conf,
698 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
699 uint16_t i, j, idx, shift;
703 PMD_INIT_FUNC_TRACE();
705 if (reta_size < FM10K_MAX_RSS_INDICES) {
706 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
707 "(%d) doesn't match the number hardware can supported "
708 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
713 * Read Redirection Table RETA[n], n=0..31. The redirection table has
714 * 128-entries in 32 registers
716 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
717 idx = i / RTE_RETA_GROUP_SIZE;
718 shift = i % RTE_RETA_GROUP_SIZE;
719 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
720 BIT_MASK_PER_UINT32);
724 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
725 for (j = 0; j < CHARS_PER_UINT32; j++) {
726 if (mask & (0x1 << j))
727 reta_conf[idx].reta[shift + j] = ((reta >>
728 CHAR_BIT * j) & UINT8_MAX);
735 /* Mailbox message handler in VF */
736 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
737 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
738 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
739 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
740 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
743 /* Mailbox message handler in PF */
744 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
745 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
746 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
747 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
748 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
749 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
750 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
751 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
755 fm10k_setup_mbx_service(struct fm10k_hw *hw)
759 /* Initialize mailbox lock */
760 fm10k_mbx_initlock(hw);
762 /* Replace default message handler with new ones */
763 if (hw->mac.type == fm10k_mac_pf)
764 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
766 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
769 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
773 /* Connect to SM for PF device or PF for VF device */
774 return hw->mbx.ops.connect(hw, &hw->mbx);
777 static struct eth_dev_ops fm10k_eth_dev_ops = {
778 .dev_configure = fm10k_dev_configure,
779 .stats_get = fm10k_stats_get,
780 .stats_reset = fm10k_stats_reset,
781 .link_update = fm10k_link_update,
782 .dev_infos_get = fm10k_dev_infos_get,
783 .rx_queue_setup = fm10k_rx_queue_setup,
784 .rx_queue_release = fm10k_rx_queue_release,
785 .tx_queue_setup = fm10k_tx_queue_setup,
786 .tx_queue_release = fm10k_tx_queue_release,
787 .reta_update = fm10k_reta_update,
788 .reta_query = fm10k_reta_query,
792 eth_fm10k_dev_init(__rte_unused struct eth_driver *eth_drv,
793 struct rte_eth_dev *dev)
795 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
798 PMD_INIT_FUNC_TRACE();
800 dev->dev_ops = &fm10k_eth_dev_ops;
802 /* only initialize in the primary process */
803 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
806 /* Vendor and Device ID need to be set before init of shared code */
807 memset(hw, 0, sizeof(*hw));
808 hw->device_id = dev->pci_dev->id.device_id;
809 hw->vendor_id = dev->pci_dev->id.vendor_id;
810 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
811 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
813 hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
814 if (hw->hw_addr == NULL) {
815 PMD_INIT_LOG(ERR, "Bad mem resource."
816 " Try to blacklist unused devices.");
820 /* Store fm10k_adapter pointer */
821 hw->back = dev->data->dev_private;
823 /* Initialize the shared code */
824 diag = fm10k_init_shared_code(hw);
825 if (diag != FM10K_SUCCESS) {
826 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
831 * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
832 * there is no way to get link status without reading BAR4. Until this
833 * works, assume we have maximum bandwidth.
834 * @todo - fix bus info
836 hw->bus_caps.speed = fm10k_bus_speed_8000;
837 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
838 hw->bus_caps.payload = fm10k_bus_payload_512;
839 hw->bus.speed = fm10k_bus_speed_8000;
840 hw->bus.width = fm10k_bus_width_pcie_x8;
841 hw->bus.payload = fm10k_bus_payload_256;
843 /* Initialize the hw */
844 diag = fm10k_init_hw(hw);
845 if (diag != FM10K_SUCCESS) {
846 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
850 /* Initialize MAC address(es) */
851 dev->data->mac_addrs = rte_zmalloc("fm10k", ETHER_ADDR_LEN, 0);
852 if (dev->data->mac_addrs == NULL) {
853 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
857 diag = fm10k_read_mac_addr(hw);
858 if (diag != FM10K_SUCCESS) {
860 * TODO: remove special handling on VF. Need shared code to
863 if (hw->mac.type == fm10k_mac_pf) {
864 PMD_INIT_LOG(ERR, "Read MAC addr failed: %d", diag);
867 /* Generate a random addr */
868 eth_random_addr(hw->mac.addr);
869 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
873 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
874 &dev->data->mac_addrs[0]);
876 /* Reset the hw statistics */
877 fm10k_stats_reset(dev);
880 diag = fm10k_reset_hw(hw);
881 if (diag != FM10K_SUCCESS) {
882 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
886 /* Setup mailbox service */
887 diag = fm10k_setup_mbx_service(hw);
888 if (diag != FM10K_SUCCESS) {
889 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
894 * Below function will trigger operations on mailbox, acquire lock to
895 * avoid race condition from interrupt handler. Operations on mailbox
896 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
897 * will handle and generate an interrupt to our side. Then, FIFO in
898 * mailbox will be touched.
901 /* Enable port first */
902 hw->mac.ops.update_lport_state(hw, 0, 0, 1);
904 /* Update default vlan */
905 hw->mac.ops.update_vlan(hw, hw->mac.default_vid, 0, true);
908 * Add default mac/vlan filter. glort is assigned by SM for PF, while is
909 * unused for VF. PF will assign correct glort for VF.
911 hw->mac.ops.update_uc_addr(hw, hw->mac.dglort_map, hw->mac.addr,
912 hw->mac.default_vid, 1, 0);
914 /* Set unicast mode by default. App can change to other mode in other
917 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
918 FM10K_XCAST_MODE_MULTI);
920 fm10k_mbx_unlock(hw);
926 * The set of PCI devices this driver supports. This driver will enable both PF
927 * and SRIOV-VF devices.
929 static struct rte_pci_id pci_id_fm10k_map[] = {
930 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
931 #include "rte_pci_dev_ids.h"
932 { .vendor_id = 0, /* sentinel */ },
935 static struct eth_driver rte_pmd_fm10k = {
937 .name = "rte_pmd_fm10k",
938 .id_table = pci_id_fm10k_map,
939 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
941 .eth_dev_init = eth_fm10k_dev_init,
942 .dev_private_size = sizeof(struct fm10k_adapter),
946 * Driver initialization routine.
947 * Invoked once at EAL init time.
948 * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
951 rte_pmd_fm10k_init(__rte_unused const char *name,
952 __rte_unused const char *params)
954 PMD_INIT_FUNC_TRACE();
955 rte_eth_driver_register(&rte_pmd_fm10k);
959 static struct rte_driver rte_fm10k_driver = {
961 .init = rte_pmd_fm10k_init,
964 PMD_REGISTER_DRIVER(rte_fm10k_driver);