1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
38 * fm10k_reset_hw_pf - PF hardware reset
39 * @hw: pointer to hardware structure
41 * This function should return the hardware to a state similar to the
42 * one it is in after being powered on.
44 STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
50 DEBUGFUNC("fm10k_reset_hw_pf");
52 /* Disable interrupts */
53 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
55 /* Lock ITR2 reg 0 into itself and disable interrupt moderation */
56 FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0);
57 FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0);
59 /* We assume here Tx and Rx queue 0 are owned by the PF */
61 /* Shut off VF access to their queues forcing them to queue 0 */
62 for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
63 FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0);
64 FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0);
67 /* shut down all rings */
68 err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
69 if (err == FM10K_ERR_REQUESTS_PENDING) {
70 hw->mac.reset_while_pending++;
76 /* Verify that DMA is no longer active */
77 reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL);
78 if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
79 return FM10K_ERR_DMA_PENDING;
82 /* Inititate data path reset */
83 reg = FM10K_DMA_CTRL_DATAPATH_RESET;
84 FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, reg);
86 /* Flush write and allow 100us for reset to complete */
87 FM10K_WRITE_FLUSH(hw);
88 usec_delay(FM10K_RESET_TIMEOUT);
90 /* Verify we made it out of reset */
91 reg = FM10K_READ_REG(hw, FM10K_IP);
92 if (!(reg & FM10K_IP_NOTINRESET))
93 return FM10K_ERR_RESET_FAILED;
99 * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
100 * @hw: pointer to hardware structure
102 * Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
104 STATIC bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
106 u16 sriov_ctrl = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_SRIOV_CTRL);
108 DEBUGFUNC("fm10k_is_ari_hierarchy_pf");
110 return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
114 * fm10k_init_hw_pf - PF hardware initialization
115 * @hw: pointer to hardware structure
118 STATIC s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
120 u32 dma_ctrl, txqctl;
123 DEBUGFUNC("fm10k_init_hw_pf");
125 /* Establish default VSI as valid */
126 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
127 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
128 FM10K_DGLORTMAP_ANY);
130 /* Invalidate all other GLORT entries */
131 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
132 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
134 /* reset ITR2(0) to point to itself */
135 FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0);
137 /* reset VF ITR2(0) to point to 0 avoid PF registers */
138 FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
140 /* loop through all PF ITR2 registers pointing them to the previous */
141 for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
142 FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1);
144 /* Enable interrupt moderator if not already enabled */
145 FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
147 /* compute the default txqctl configuration */
148 txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
149 (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
151 for (i = 0; i < FM10K_MAX_QUEUES; i++) {
152 /* configure rings for 256 Queue / 32 Descriptor cache mode */
153 FM10K_WRITE_REG(hw, FM10K_TQDLOC(i),
154 (i * FM10K_TQDLOC_BASE_32_DESC) |
155 FM10K_TQDLOC_SIZE_32_DESC);
156 FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl);
158 /* configure rings to provide TPH processing hints */
159 FM10K_WRITE_REG(hw, FM10K_TPH_TXCTRL(i),
160 FM10K_TPH_TXCTRL_DESC_TPHEN |
161 FM10K_TPH_TXCTRL_DESC_RROEN |
162 FM10K_TPH_TXCTRL_DESC_WROEN |
163 FM10K_TPH_TXCTRL_DATA_RROEN);
164 FM10K_WRITE_REG(hw, FM10K_TPH_RXCTRL(i),
165 FM10K_TPH_RXCTRL_DESC_TPHEN |
166 FM10K_TPH_RXCTRL_DESC_RROEN |
167 FM10K_TPH_RXCTRL_DATA_WROEN |
168 FM10K_TPH_RXCTRL_HDR_WROEN);
171 /* set max hold interval to align with 1.024 usec in all modes and
174 switch (hw->bus.speed) {
175 case fm10k_bus_speed_2500:
176 dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
177 hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1;
179 case fm10k_bus_speed_5000:
180 dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
181 hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2;
183 case fm10k_bus_speed_8000:
184 dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
185 hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
189 /* just in case, assume Gen3 ITR scale */
190 hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
194 /* Configure TSO flags */
195 FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
196 FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
199 * Set Rx Descriptor size to 32
200 * Set Minimum MSS to 64
201 * Set Maximum number of Rx queues to 256 / 32 Descriptor
203 dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
204 FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
205 FM10K_DMA_CTRL_32_DESC;
207 FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, dma_ctrl);
209 /* record maximum queue count, we limit ourselves to 128 */
210 hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
212 /* We support either 64 VFs or 7 VFs depending on if we have ARI */
213 hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
215 return FM10K_SUCCESS;
218 #ifndef NO_IS_SLOT_APPROPRIATE_CHECK
220 * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU
221 * @hw: pointer to hardware structure
223 * Looks at the PCIe bus info to confirm whether or not this slot can support
224 * the necessary bandwidth for this device.
226 STATIC bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw)
228 DEBUGFUNC("fm10k_is_slot_appropriate_pf");
230 return (hw->bus.speed == hw->bus_caps.speed) &&
231 (hw->bus.width == hw->bus_caps.width);
236 * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
237 * @hw: pointer to hardware structure
238 * @vid: VLAN ID to add to table
239 * @vsi: Index indicating VF ID or PF ID in table
240 * @set: Indicates if this is a set or clear operation
242 * This function adds or removes the corresponding VLAN ID from the VLAN
243 * filter table for the corresponding function. In addition to the
244 * standard set/clear that supports one bit a multi-bit write is
245 * supported to set 64 bits at a time.
247 STATIC s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
249 u32 vlan_table, reg, mask, bit, len;
251 /* verify the VSI index is valid */
252 if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
253 return FM10K_ERR_PARAM;
255 /* VLAN multi-bit write:
256 * The multi-bit write has several parts to it.
258 * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
259 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
260 * | RSVD0 | Length |C|RSVD0| VLAN ID |
261 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
263 * VLAN ID: Vlan Starting value
264 * RSVD0: Reserved section, must be 0
265 * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
266 * Length: Number of times to repeat the bit being set
269 vid = (vid << 17) >> 17;
271 /* verify the reserved 0 fields are 0 */
272 if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
273 return FM10K_ERR_PARAM;
275 /* Loop through the table updating all required VLANs */
276 for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
277 len < FM10K_VLAN_TABLE_VID_MAX;
278 len -= 32 - bit, reg++, bit = 0) {
279 /* record the initial state of the register */
280 vlan_table = FM10K_READ_REG(hw, reg);
282 /* truncate mask if we are at the start or end of the run */
283 mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
285 /* make necessary modifications to the register */
286 mask &= set ? ~vlan_table : vlan_table;
288 FM10K_WRITE_REG(hw, reg, vlan_table ^ mask);
291 return FM10K_SUCCESS;
295 * fm10k_read_mac_addr_pf - Read device MAC address
296 * @hw: pointer to the HW structure
298 * Reads the device MAC address from the SM_AREA and stores the value.
300 STATIC s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
302 u8 perm_addr[ETH_ALEN];
305 DEBUGFUNC("fm10k_read_mac_addr_pf");
307 serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(1));
309 /* last byte should be all 1's */
310 if ((~serial_num) << 24)
311 return FM10K_ERR_INVALID_MAC_ADDR;
313 perm_addr[0] = (u8)(serial_num >> 24);
314 perm_addr[1] = (u8)(serial_num >> 16);
315 perm_addr[2] = (u8)(serial_num >> 8);
317 serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(0));
319 /* first byte should be all 1's */
320 if ((~serial_num) >> 24)
321 return FM10K_ERR_INVALID_MAC_ADDR;
323 perm_addr[3] = (u8)(serial_num >> 16);
324 perm_addr[4] = (u8)(serial_num >> 8);
325 perm_addr[5] = (u8)(serial_num);
327 memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
328 memcpy(hw->mac.addr, perm_addr, ETH_ALEN);
330 return FM10K_SUCCESS;
334 * fm10k_glort_valid_pf - Validate that the provided glort is valid
335 * @hw: pointer to the HW structure
336 * @glort: base glort to be validated
338 * This function will return an error if the provided glort is invalid
340 bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
342 glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
344 return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
348 * fm10k_update_xc_addr_pf - Update device addresses
349 * @hw: pointer to the HW structure
350 * @glort: base resource tag for this request
351 * @mac: MAC address to add/remove from table
352 * @vid: VLAN ID to add/remove from table
353 * @add: Indicates if this is an add or remove operation
354 * @flags: flags field to indicate add and secure
356 * This function generates a message to the Switch API requesting
357 * that the given logical port add/remove the given L2 MAC/VLAN address.
359 STATIC s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
360 const u8 *mac, u16 vid, bool add, u8 flags)
362 struct fm10k_mbx_info *mbx = &hw->mbx;
363 struct fm10k_mac_update mac_update;
366 DEBUGFUNC("fm10k_update_xc_addr_pf");
368 /* clear set bit from VLAN ID */
369 vid &= ~FM10K_VLAN_CLEAR;
371 /* if glort or VLAN are not valid return error */
372 if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
373 return FM10K_ERR_PARAM;
376 mac_update.mac_lower = FM10K_CPU_TO_LE32(((u32)mac[2] << 24) |
377 ((u32)mac[3] << 16) |
380 mac_update.mac_upper = FM10K_CPU_TO_LE16(((u16)mac[0] << 8) |
382 mac_update.vlan = FM10K_CPU_TO_LE16(vid);
383 mac_update.glort = FM10K_CPU_TO_LE16(glort);
384 mac_update.action = add ? 0 : 1;
385 mac_update.flags = flags;
387 /* populate mac_update fields */
388 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
389 fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
390 &mac_update, sizeof(mac_update));
392 /* load onto outgoing mailbox */
393 return mbx->ops.enqueue_tx(hw, mbx, msg);
397 * fm10k_update_uc_addr_pf - Update device unicast addresses
398 * @hw: pointer to the HW structure
399 * @glort: base resource tag for this request
400 * @mac: MAC address to add/remove from table
401 * @vid: VLAN ID to add/remove from table
402 * @add: Indicates if this is an add or remove operation
403 * @flags: flags field to indicate add and secure
405 * This function is used to add or remove unicast addresses for
408 STATIC s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
409 const u8 *mac, u16 vid, bool add, u8 flags)
411 DEBUGFUNC("fm10k_update_uc_addr_pf");
413 /* verify MAC address is valid */
414 if (!IS_VALID_ETHER_ADDR(mac))
415 return FM10K_ERR_PARAM;
417 return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
421 * fm10k_update_mc_addr_pf - Update device multicast addresses
422 * @hw: pointer to the HW structure
423 * @glort: base resource tag for this request
424 * @mac: MAC address to add/remove from table
425 * @vid: VLAN ID to add/remove from table
426 * @add: Indicates if this is an add or remove operation
428 * This function is used to add or remove multicast MAC addresses for
431 STATIC s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
432 const u8 *mac, u16 vid, bool add)
434 DEBUGFUNC("fm10k_update_mc_addr_pf");
436 /* verify multicast address is valid */
437 if (!IS_MULTICAST_ETHER_ADDR(mac))
438 return FM10K_ERR_PARAM;
440 return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
444 * fm10k_update_xcast_mode_pf - Request update of multicast mode
445 * @hw: pointer to hardware structure
446 * @glort: base resource tag for this request
447 * @mode: integer value indicating mode being requested
449 * This function will attempt to request a higher mode for the port
450 * so that it can enable either multicast, multicast promiscuous, or
451 * promiscuous mode of operation.
453 STATIC s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
455 struct fm10k_mbx_info *mbx = &hw->mbx;
456 u32 msg[3], xcast_mode;
458 DEBUGFUNC("fm10k_update_xcast_mode_pf");
460 if (mode > FM10K_XCAST_MODE_NONE)
461 return FM10K_ERR_PARAM;
463 /* if glort is not valid return error */
464 if (!fm10k_glort_valid_pf(hw, glort))
465 return FM10K_ERR_PARAM;
467 /* write xcast mode as a single u32 value,
468 * lower 16 bits: glort
469 * upper 16 bits: mode
471 xcast_mode = ((u32)mode << 16) | glort;
473 /* generate message requesting to change xcast mode */
474 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
475 fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
477 /* load onto outgoing mailbox */
478 return mbx->ops.enqueue_tx(hw, mbx, msg);
482 * fm10k_update_int_moderator_pf - Update interrupt moderator linked list
483 * @hw: pointer to hardware structure
485 * This function walks through the MSI-X vector table to determine the
486 * number of active interrupts and based on that information updates the
487 * interrupt moderator linked list.
489 STATIC void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
493 /* Disable interrupt moderator */
494 FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0);
496 /* loop through PF from last to first looking enabled vectors */
497 for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
498 if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i)))
502 /* always reset VFITR2[0] to point to last enabled PF vector */
503 FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
505 /* reset ITR2[0] to point to last enabled PF vector */
506 if (!hw->iov.num_vfs)
507 FM10K_WRITE_REG(hw, FM10K_ITR2(0), i);
509 /* Enable interrupt moderator */
510 FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
514 * fm10k_update_lport_state_pf - Notify the switch of a change in port state
515 * @hw: pointer to the HW structure
516 * @glort: base resource tag for this request
517 * @count: number of logical ports being updated
518 * @enable: boolean value indicating enable or disable
520 * This function is used to add/remove a logical port from the switch.
522 STATIC s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
523 u16 count, bool enable)
525 struct fm10k_mbx_info *mbx = &hw->mbx;
526 u32 msg[3], lport_msg;
528 DEBUGFUNC("fm10k_lport_state_pf");
530 /* do nothing if we are being asked to create or destroy 0 ports */
532 return FM10K_SUCCESS;
534 /* if glort is not valid return error */
535 if (!fm10k_glort_valid_pf(hw, glort))
536 return FM10K_ERR_PARAM;
538 /* reset multicast mode if deleting lport */
540 fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
542 /* construct the lport message from the 2 pieces of data we have */
543 lport_msg = ((u32)count << 16) | glort;
545 /* generate lport create/delete message */
546 fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
547 FM10K_PF_MSG_ID_LPORT_DELETE);
548 fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
550 /* load onto outgoing mailbox */
551 return mbx->ops.enqueue_tx(hw, mbx, msg);
555 * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
556 * @hw: pointer to hardware structure
557 * @dglort: pointer to dglort configuration structure
559 * Reads the configuration structure contained in dglort_cfg and uses
560 * that information to then populate a DGLORTMAP/DEC entry and the queues
561 * to which it has been assigned.
563 STATIC s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
564 struct fm10k_dglort_cfg *dglort)
566 u16 glort, queue_count, vsi_count, pc_count;
567 u16 vsi, queue, pc, q_idx;
568 u32 txqctl, dglortdec, dglortmap;
570 /* verify the dglort pointer */
572 return FM10K_ERR_PARAM;
574 /* verify the dglort values */
575 if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
576 (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
577 (dglort->queue_l > 8) || (dglort->queue_b >= 256))
578 return FM10K_ERR_PARAM;
580 /* determine count of VSIs and queues */
581 queue_count = BIT(dglort->rss_l + dglort->pc_l);
582 vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
583 glort = dglort->glort;
584 q_idx = dglort->queue_b;
586 /* configure SGLORT for queues */
587 for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
588 for (queue = 0; queue < queue_count; queue++, q_idx++) {
589 if (q_idx >= FM10K_MAX_QUEUES)
592 FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(q_idx), glort);
593 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(q_idx), glort);
597 /* determine count of PCs and queues */
598 queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
599 pc_count = BIT(dglort->pc_l);
601 /* configure PC for Tx queues */
602 for (pc = 0; pc < pc_count; pc++) {
603 q_idx = pc + dglort->queue_b;
604 for (queue = 0; queue < queue_count; queue++) {
605 if (q_idx >= FM10K_MAX_QUEUES)
608 txqctl = FM10K_READ_REG(hw, FM10K_TXQCTL(q_idx));
609 txqctl &= ~FM10K_TXQCTL_PC_MASK;
610 txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
611 FM10K_WRITE_REG(hw, FM10K_TXQCTL(q_idx), txqctl);
617 /* configure DGLORTDEC */
618 dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
619 ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
620 ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
621 ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
622 ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
623 ((u32)(dglort->queue_l));
624 if (dglort->inner_rss)
625 dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE;
627 /* configure DGLORTMAP */
628 dglortmap = (dglort->idx == fm10k_dglort_default) ?
629 FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
630 dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
631 dglortmap |= dglort->glort;
633 /* write values to hardware */
634 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
635 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
637 return FM10K_SUCCESS;
640 u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
642 u16 num_pools = hw->iov.num_pools;
644 return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
645 8 : FM10K_MAX_QUEUES_POOL;
648 u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
650 u16 num_vfs = hw->iov.num_vfs;
651 u16 vf_q_idx = FM10K_MAX_QUEUES;
653 vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
658 STATIC u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
660 u16 num_pools = hw->iov.num_pools;
662 return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
663 FM10K_MAX_VECTORS_POOL;
666 STATIC u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
668 u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
670 vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
676 * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
677 * @hw: pointer to the HW structure
678 * @num_vfs: number of VFs to be allocated
679 * @num_pools: number of virtualization pools to be allocated
681 * Allocates queues and traffic classes to virtualization entities to prepare
682 * the PF for SR-IOV and VMDq
684 STATIC s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
687 u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
688 u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
691 /* hardware only supports up to 64 pools */
693 return FM10K_ERR_PARAM;
695 /* the number of VFs cannot exceed the number of pools */
696 if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
697 return FM10K_ERR_PARAM;
699 /* record number of virtualization entities */
700 hw->iov.num_vfs = num_vfs;
701 hw->iov.num_pools = num_pools;
703 /* determine qmap offsets and counts */
704 qmap_stride = (num_vfs > 8) ? 32 : 256;
705 qpp = fm10k_queues_per_pool(hw);
706 vpp = fm10k_vectors_per_pool(hw);
708 /* calculate starting index for queues */
709 vf_q_idx = fm10k_vf_queue_index(hw, 0);
712 /* establish TCs with -1 credits and no quanta to prevent transmit */
713 for (i = 0; i < num_vfs; i++) {
714 FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(i), 0);
715 FM10K_WRITE_REG(hw, FM10K_TC_RATE(i), 0);
716 FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(i),
717 FM10K_TC_CREDIT_CREDIT_MASK);
720 /* zero out all mbmem registers */
721 for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
722 FM10K_WRITE_REG(hw, FM10K_MBMEM(i), 0);
724 /* clear event notification of VF FLR */
725 FM10K_WRITE_REG(hw, FM10K_PFVFLREC(0), ~0);
726 FM10K_WRITE_REG(hw, FM10K_PFVFLREC(1), ~0);
728 /* loop through unallocated rings assigning them back to PF */
729 for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
730 FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0);
731 FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
732 FM10K_TXQCTL_UNLIMITED_BW | vid);
733 FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
736 /* PF should have already updated VFITR2[0] */
738 /* update all ITR registers to flow to VFITR2[0] */
739 for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
740 if (!(i & (vpp - 1)))
741 FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - vpp);
743 FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1);
746 /* update PF ITR2[0] to reference the last vector */
747 FM10K_WRITE_REG(hw, FM10K_ITR2(0),
748 fm10k_vf_vector_index(hw, num_vfs - 1));
750 /* loop through rings populating rings and TCs */
751 for (i = 0; i < num_vfs; i++) {
752 /* record index for VF queue 0 for use in end of loop */
753 vf_q_idx0 = vf_q_idx;
755 for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
756 /* assign VF and locked TC to queues */
757 FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
758 FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx),
759 (i << FM10K_TXQCTL_TC_SHIFT) | i |
760 FM10K_TXQCTL_VF | vid);
761 FM10K_WRITE_REG(hw, FM10K_RXDCTL(vf_q_idx),
762 FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
763 FM10K_RXDCTL_DROP_ON_EMPTY);
764 FM10K_WRITE_REG(hw, FM10K_RXQCTL(vf_q_idx),
765 (i << FM10K_RXQCTL_VF_SHIFT) |
768 /* map queue pair to VF */
769 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
770 FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
773 /* repeat the first ring for all of the remaining VF rings */
774 for (; j < qmap_stride; j++, qmap_idx++) {
775 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
776 FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
780 /* loop through remaining indexes assigning all to queue 0 */
781 while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
782 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
783 FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), 0);
787 return FM10K_SUCCESS;
791 * fm10k_iov_configure_tc_pf - Configure the shaping group for VF
792 * @hw: pointer to the HW structure
793 * @vf_idx: index of VF receiving GLORT
794 * @rate: Rate indicated in Mb/s
796 * Configured the TC for a given VF to allow only up to a given number
797 * of Mb/s of outgoing Tx throughput.
799 STATIC s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
801 /* configure defaults */
802 u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
803 u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
805 /* verify vf is in range */
806 if (vf_idx >= hw->iov.num_vfs)
807 return FM10K_ERR_PARAM;
809 /* set interval to align with 4.096 usec in all modes */
810 switch (hw->bus.speed) {
811 case fm10k_bus_speed_2500:
812 interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
814 case fm10k_bus_speed_5000:
815 interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
822 if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
823 return FM10K_ERR_PARAM;
825 /* The quanta is measured in Bytes per 4.096 or 8.192 usec
826 * The rate is provided in Mbits per second
827 * To tralslate from rate to quanta we need to multiply the
828 * rate by 8.192 usec and divide by 8 bits/byte. To avoid
829 * dealing with floating point we can round the values up
830 * to the nearest whole number ratio which gives us 128 / 125.
832 tc_rate = (rate * 128) / 125;
834 /* try to keep the rate limiting accurate by increasing
835 * the number of credits and interval for rates less than 4Gb/s
843 /* update rate limiter with new values */
844 FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
845 FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
846 FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
848 return FM10K_SUCCESS;
852 * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
853 * @hw: pointer to the HW structure
854 * @vf_idx: index of VF receiving GLORT
856 * Update the interrupt moderator linked list to include any MSI-X
857 * interrupts which the VF has enabled in the MSI-X vector table.
859 STATIC s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
861 u16 vf_v_idx, vf_v_limit, i;
863 /* verify vf is in range */
864 if (vf_idx >= hw->iov.num_vfs)
865 return FM10K_ERR_PARAM;
867 /* determine vector offset and count */
868 vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
869 vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
871 /* search for first vector that is not masked */
872 for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
873 if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i)))
877 /* reset linked list so it now includes our active vectors */
878 if (vf_idx == (hw->iov.num_vfs - 1))
879 FM10K_WRITE_REG(hw, FM10K_ITR2(0), i);
881 FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), i);
883 return FM10K_SUCCESS;
887 * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
888 * @hw: pointer to the HW structure
889 * @vf_info: pointer to VF information structure
891 * Assign a MAC address and default VLAN to a VF and notify it of the update
893 STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
894 struct fm10k_vf_info *vf_info)
896 u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
897 u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
898 s32 err = FM10K_SUCCESS;
901 /* verify vf is in range */
902 if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
903 return FM10K_ERR_PARAM;
905 /* determine qmap offsets and counts */
906 qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
907 queues_per_pool = fm10k_queues_per_pool(hw);
909 /* calculate starting index for queues */
910 vf_idx = vf_info->vf_idx;
911 vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
912 qmap_idx = qmap_stride * vf_idx;
914 /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
915 * used here to indicate to the VF that it will not have privilege to
916 * write VLAN_TABLE. All policy is enforced on the PF but this allows
917 * the VF to correctly report errors to userspace rqeuests.
920 vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
922 vf_vid = vf_info->sw_vid;
924 /* generate MAC_ADDR request */
925 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
926 fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
927 vf_info->mac, vf_vid);
929 /* Configure Queue control register with new VLAN ID. The TXQCTL
930 * register is RO from the VF, so the PF must do this even in the
931 * case of notifying the VF of a new VID via the mailbox.
933 txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
934 FM10K_TXQCTL_VID_MASK;
935 txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
936 FM10K_TXQCTL_VF | vf_idx;
938 for (i = 0; i < queues_per_pool; i++)
939 FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
941 /* try loading a message onto outgoing mailbox first */
942 if (vf_info->mbx.ops.enqueue_tx) {
943 err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
944 if (err != FM10K_MBX_ERR_NO_MBX)
949 /* If we aren't connected to a mailbox, this is most likely because
950 * the VF driver is not running. It should thus be safe to re-map
951 * queues and use the registers to pass the MAC address so that the VF
952 * driver gets correct information during its initialization.
955 /* MAP Tx queue back to 0 temporarily, and disable it */
956 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
957 FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
959 /* verify ring has disabled before modifying base address registers */
960 txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
961 for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
962 /* limit ourselves to a 1ms timeout */
964 err = FM10K_ERR_DMA_PENDING;
969 txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
972 /* Update base address registers to contain MAC address */
973 if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
974 tdbal = (((u32)vf_info->mac[3]) << 24) |
975 (((u32)vf_info->mac[4]) << 16) |
976 (((u32)vf_info->mac[5]) << 8);
978 tdbah = (((u32)0xFF) << 24) |
979 (((u32)vf_info->mac[0]) << 16) |
980 (((u32)vf_info->mac[1]) << 8) |
981 ((u32)vf_info->mac[2]);
984 /* Record the base address into queue 0 */
985 FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx), tdbal);
986 FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx), tdbah);
988 /* Provide the VF the ITR scale, using software-defined fields in TDLEN
989 * to pass the information during VF initialization. See definition of
990 * FM10K_TDLEN_ITR_SCALE_SHIFT for more details.
992 FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale <<
993 FM10K_TDLEN_ITR_SCALE_SHIFT);
996 /* restore the queue back to VF ownership */
997 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
1002 * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
1003 * @hw: pointer to the HW structure
1004 * @vf_info: pointer to VF information structure
1006 * Reassign the interrupts and queues to a VF following an FLR
1008 STATIC s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
1009 struct fm10k_vf_info *vf_info)
1011 u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
1012 u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
1013 u16 vf_v_idx, vf_v_limit, vf_vid;
1014 u8 vf_idx = vf_info->vf_idx;
1017 /* verify vf is in range */
1018 if (vf_idx >= hw->iov.num_vfs)
1019 return FM10K_ERR_PARAM;
1021 /* clear event notification of VF FLR */
1022 FM10K_WRITE_REG(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
1024 /* force timeout and then disconnect the mailbox */
1025 vf_info->mbx.timeout = 0;
1026 if (vf_info->mbx.ops.disconnect)
1027 vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
1029 /* determine vector offset and count */
1030 vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
1031 vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
1033 /* determine qmap offsets and counts */
1034 qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
1035 queues_per_pool = fm10k_queues_per_pool(hw);
1036 qmap_idx = qmap_stride * vf_idx;
1038 /* make all the queues inaccessible to the VF */
1039 for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
1040 FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0);
1041 FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0);
1044 /* calculate starting index for queues */
1045 vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
1047 /* determine correct default VLAN ID */
1048 if (vf_info->pf_vid)
1049 vf_vid = vf_info->pf_vid;
1051 vf_vid = vf_info->sw_vid;
1053 /* configure Queue control register */
1054 txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
1055 (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
1056 FM10K_TXQCTL_VF | vf_idx;
1057 rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
1059 /* stop further DMA and reset queue ownership back to VF */
1060 for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
1061 FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0);
1062 FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl);
1063 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i),
1064 FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
1065 FM10K_RXDCTL_DROP_ON_EMPTY);
1066 FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), rxqctl);
1069 /* reset TC with -1 credits and no quanta to prevent transmit */
1070 FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
1071 FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), 0);
1072 FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx),
1073 FM10K_TC_CREDIT_CREDIT_MASK);
1075 /* update our first entry in the table based on previous VF */
1077 hw->mac.ops.update_int_moderator(hw);
1079 hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
1081 /* reset linked list so it now includes our active vectors */
1082 if (vf_idx == (hw->iov.num_vfs - 1))
1083 FM10K_WRITE_REG(hw, FM10K_ITR2(0), vf_v_idx);
1085 FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
1087 /* link remaining vectors so that next points to previous */
1088 for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
1089 FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
1091 /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
1092 for (i = FM10K_VFMBMEM_LEN; i--;)
1093 FM10K_WRITE_REG(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
1094 for (i = FM10K_VLAN_TABLE_SIZE; i--;)
1095 FM10K_WRITE_REG(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
1096 for (i = FM10K_RETA_SIZE; i--;)
1097 FM10K_WRITE_REG(hw, FM10K_RETA(vf_info->vsi, i), 0);
1098 for (i = FM10K_RSSRK_SIZE; i--;)
1099 FM10K_WRITE_REG(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
1100 FM10K_WRITE_REG(hw, FM10K_MRQC(vf_info->vsi), 0);
1102 /* Update base address registers to contain MAC address */
1103 if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
1104 tdbal = (((u32)vf_info->mac[3]) << 24) |
1105 (((u32)vf_info->mac[4]) << 16) |
1106 (((u32)vf_info->mac[5]) << 8);
1107 tdbah = (((u32)0xFF) << 24) |
1108 (((u32)vf_info->mac[0]) << 16) |
1109 (((u32)vf_info->mac[1]) << 8) |
1110 ((u32)vf_info->mac[2]);
1113 /* map queue pairs back to VF from last to first */
1114 for (i = queues_per_pool; i--;) {
1115 FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
1116 FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
1117 /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an
1118 * explanation of how TDLEN is used.
1120 FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx + i),
1121 hw->mac.itr_scale <<
1122 FM10K_TDLEN_ITR_SCALE_SHIFT);
1123 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
1124 FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
1127 /* repeat the first ring for all the remaining VF rings */
1128 for (i = queues_per_pool; i < qmap_stride; i++) {
1129 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
1130 FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
1133 return FM10K_SUCCESS;
1137 * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
1138 * @hw: pointer to hardware structure
1139 * @vf_info: pointer to VF information structure
1140 * @lport_idx: Logical port offset from the hardware glort
1141 * @flags: Set of capability flags to extend port beyond basic functionality
1143 * This function allows enabling a VF port by assigning it a GLORT and
1144 * setting the flags so that it can enable an Rx mode.
1146 STATIC s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
1147 struct fm10k_vf_info *vf_info,
1148 u16 lport_idx, u8 flags)
1150 u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
1152 DEBUGFUNC("fm10k_iov_set_lport_state_pf");
1154 /* if glort is not valid return error */
1155 if (!fm10k_glort_valid_pf(hw, glort))
1156 return FM10K_ERR_PARAM;
1158 vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
1159 vf_info->glort = glort;
1161 return FM10K_SUCCESS;
1165 * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
1166 * @hw: pointer to hardware structure
1167 * @vf_info: pointer to VF information structure
1169 * This function disables a VF port by stripping it of a GLORT and
1170 * setting the flags so that it cannot enable any Rx mode.
1172 STATIC void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
1173 struct fm10k_vf_info *vf_info)
1177 DEBUGFUNC("fm10k_iov_reset_lport_state_pf");
1179 /* need to disable the port if it is already enabled */
1180 if (FM10K_VF_FLAG_ENABLED(vf_info)) {
1181 /* notify switch that this port has been disabled */
1182 fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
1184 /* generate port state response to notify VF it is not ready */
1185 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
1186 vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
1189 /* clear flags and glort if it exists */
1190 vf_info->vf_flags = 0;
1195 * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
1196 * @hw: pointer to hardware structure
1197 * @q: stats for all queues of a VF
1198 * @vf_idx: index of VF
1200 * This function collects queue stats for VFs.
1202 STATIC void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
1203 struct fm10k_hw_stats_q *q,
1208 /* get stats for all of the queues */
1209 qpp = fm10k_queues_per_pool(hw);
1210 idx = fm10k_vf_queue_index(hw, vf_idx);
1211 fm10k_update_hw_stats_q(hw, q, idx, qpp);
1215 * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
1216 * @hw: Pointer to hardware structure
1217 * @results: Pointer array to message, results[0] is pointer to message
1218 * @mbx: Pointer to mailbox information structure
1220 * This function is a default handler for MSI-X requests from the VF. The
1221 * assumption is that in this case it is acceptable to just directly
1222 * hand off the message from the VF to the underlying shared code.
1224 s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
1225 struct fm10k_mbx_info *mbx)
1227 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1228 u8 vf_idx = vf_info->vf_idx;
1230 UNREFERENCED_1PARAMETER(results);
1231 DEBUGFUNC("fm10k_iov_msg_msix_pf");
1233 return hw->iov.ops.assign_int_moderator(hw, vf_idx);
1237 * fm10k_iov_select_vid - Select correct default VLAN ID
1238 * @hw: Pointer to hardware structure
1239 * @vid: VLAN ID to correct
1241 * Will report an error if the VLAN ID is out of range. For VID = 0, it will
1242 * return either the pf_vid or sw_vid depending on which one is set.
1244 STATIC s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
1247 return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
1248 else if (vf_info->pf_vid && vid != vf_info->pf_vid)
1249 return FM10K_ERR_PARAM;
1255 * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
1256 * @hw: Pointer to hardware structure
1257 * @results: Pointer array to message, results[0] is pointer to message
1258 * @mbx: Pointer to mailbox information structure
1260 * This function is a default handler for MAC/VLAN requests from the VF.
1261 * The assumption is that in this case it is acceptable to just directly
1262 * hand off the message from the VF to the underlying shared code.
1264 s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
1265 struct fm10k_mbx_info *mbx)
1267 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1270 int err = FM10K_SUCCESS;
1275 DEBUGFUNC("fm10k_iov_msg_mac_vlan_pf");
1277 /* we shouldn't be updating rules on a disabled interface */
1278 if (!FM10K_VF_FLAG_ENABLED(vf_info))
1279 err = FM10K_ERR_PARAM;
1281 if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
1282 result = results[FM10K_MAC_VLAN_MSG_VLAN];
1284 /* record VLAN id requested */
1285 err = fm10k_tlv_attr_get_u32(result, &vid);
1289 set = !(vid & FM10K_VLAN_CLEAR);
1290 vid &= ~FM10K_VLAN_CLEAR;
1292 /* if the length field has been set, this is a multi-bit
1293 * update request. For multi-bit requests, simply disallow
1294 * them when the pf_vid has been set. In this case, the PF
1295 * should have already cleared the VLAN_TABLE, and if we
1296 * allowed them, it could allow a rogue VF to receive traffic
1297 * on a VLAN it was not assigned. In the single-bit case, we
1298 * need to modify requests for VLAN 0 to use the default PF or
1299 * SW vid when assigned.
1303 /* prevent multi-bit requests when PF has
1304 * administratively set the VLAN for this VF
1306 if (vf_info->pf_vid)
1307 return FM10K_ERR_PARAM;
1309 err = fm10k_iov_select_vid(vf_info, (u16)vid);
1316 /* update VSI info for VF in regards to VLAN table */
1317 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
1320 if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
1321 result = results[FM10K_MAC_VLAN_MSG_MAC];
1323 /* record unicast MAC address requested */
1324 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1328 /* block attempts to set MAC for a locked device */
1329 if (IS_VALID_ETHER_ADDR(vf_info->mac) &&
1330 memcmp(mac, vf_info->mac, ETH_ALEN))
1331 return FM10K_ERR_PARAM;
1333 set = !(vlan & FM10K_VLAN_CLEAR);
1334 vlan &= ~FM10K_VLAN_CLEAR;
1336 err = fm10k_iov_select_vid(vf_info, vlan);
1342 /* notify switch of request for new unicast address */
1343 err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
1347 if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
1348 result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
1350 /* record multicast MAC address requested */
1351 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1355 /* verify that the VF is allowed to request multicast */
1356 if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
1357 return FM10K_ERR_PARAM;
1359 set = !(vlan & FM10K_VLAN_CLEAR);
1360 vlan &= ~FM10K_VLAN_CLEAR;
1362 err = fm10k_iov_select_vid(vf_info, vlan);
1368 /* notify switch of request for new multicast address */
1369 err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
1377 * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
1378 * @vf_info: VF info structure containing capability flags
1379 * @mode: Requested xcast mode
1381 * This function outputs the mode that most closely matches the requested
1382 * mode. If not modes match it will request we disable the port
1384 STATIC u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
1387 u8 vf_flags = vf_info->vf_flags;
1389 /* match up mode to capabilities as best as possible */
1391 case FM10K_XCAST_MODE_PROMISC:
1392 if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
1393 return FM10K_XCAST_MODE_PROMISC;
1395 case FM10K_XCAST_MODE_ALLMULTI:
1396 if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
1397 return FM10K_XCAST_MODE_ALLMULTI;
1399 case FM10K_XCAST_MODE_MULTI:
1400 if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
1401 return FM10K_XCAST_MODE_MULTI;
1403 case FM10K_XCAST_MODE_NONE:
1404 if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
1405 return FM10K_XCAST_MODE_NONE;
1411 /* disable interface as it should not be able to request any */
1412 return FM10K_XCAST_MODE_DISABLE;
1416 * fm10k_iov_msg_lport_state_pf - Message handler for port state requests
1417 * @hw: Pointer to hardware structure
1418 * @results: Pointer array to message, results[0] is pointer to message
1419 * @mbx: Pointer to mailbox information structure
1421 * This function is a default handler for port state requests. The port
1422 * state requests for now are basic and consist of enabling or disabling
1425 s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
1426 struct fm10k_mbx_info *mbx)
1428 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1430 s32 err = FM10K_SUCCESS;
1434 DEBUGFUNC("fm10k_iov_msg_lport_state_pf");
1436 /* verify VF is allowed to enable even minimal mode */
1437 if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
1438 return FM10K_ERR_PARAM;
1440 if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
1441 result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
1443 /* XCAST mode update requested */
1444 err = fm10k_tlv_attr_get_u8(result, &mode);
1446 return FM10K_ERR_PARAM;
1448 /* prep for possible demotion depending on capabilities */
1449 mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
1451 /* if mode is not currently enabled, enable it */
1452 if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
1453 fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
1455 /* swap mode back to a bit flag */
1456 mode = FM10K_VF_FLAG_SET_MODE(mode);
1457 } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
1458 /* need to disable the port if it is already enabled */
1459 if (FM10K_VF_FLAG_ENABLED(vf_info))
1460 err = fm10k_update_lport_state_pf(hw, vf_info->glort,
1463 /* we need to clear VF_FLAG_ENABLED flags in order to ensure
1464 * that we actually re-enable the LPORT state below. Note that
1465 * this has no impact if the VF is already disabled, as the
1466 * flags are already cleared.
1469 vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
1471 /* when enabling the port we should reset the rate limiters */
1472 hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
1474 /* set mode for minimal functionality */
1475 mode = FM10K_VF_FLAG_SET_MODE_NONE;
1477 /* generate port state response to notify VF it is ready */
1478 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
1479 fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
1480 mbx->ops.enqueue_tx(hw, mbx, msg);
1483 /* if enable state toggled note the update */
1484 if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
1485 err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
1488 /* if state change succeeded, then update our stored state */
1489 mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
1491 vf_info->vf_flags = mode;
1496 #ifndef NO_DEFAULT_SRIOV_MSG_HANDLERS
1497 const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = {
1498 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1499 FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
1500 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
1501 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
1502 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1507 * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
1508 * @hw: pointer to hardware structure
1509 * @stats: pointer to the stats structure to update
1511 * This function collects and aggregates global and per queue hardware
1514 STATIC void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
1515 struct fm10k_hw_stats *stats)
1517 u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
1520 DEBUGFUNC("fm10k_update_hw_stats_pf");
1522 /* Use Tx queue 0 as a canary to detect a reset */
1523 id = FM10K_READ_REG(hw, FM10K_TXQCTL(0));
1525 /* Read Global Statistics */
1527 timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
1529 ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
1530 ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
1531 um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
1532 xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
1533 vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
1536 fm10k_read_hw_stats_32b(hw,
1537 FM10K_STATS_LOOPBACK_DROP,
1538 &stats->loopback_drop);
1539 nodesc_drop = fm10k_read_hw_stats_32b(hw,
1540 FM10K_STATS_NODESC_DROP,
1541 &stats->nodesc_drop);
1543 /* if value has not changed then we have consistent data */
1545 id = FM10K_READ_REG(hw, FM10K_TXQCTL(0));
1546 } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
1548 /* drop non-ID bits and set VALID ID bit */
1549 id &= FM10K_TXQCTL_ID_MASK;
1550 id |= FM10K_STAT_VALID;
1552 /* Update Global Statistics */
1553 if (stats->stats_idx == id) {
1554 stats->timeout.count += timeout;
1555 stats->ur.count += ur;
1556 stats->ca.count += ca;
1557 stats->um.count += um;
1558 stats->xec.count += xec;
1559 stats->vlan_drop.count += vlan_drop;
1560 stats->loopback_drop.count += loopback_drop;
1561 stats->nodesc_drop.count += nodesc_drop;
1564 /* Update bases and record current PF id */
1565 fm10k_update_hw_base_32b(&stats->timeout, timeout);
1566 fm10k_update_hw_base_32b(&stats->ur, ur);
1567 fm10k_update_hw_base_32b(&stats->ca, ca);
1568 fm10k_update_hw_base_32b(&stats->um, um);
1569 fm10k_update_hw_base_32b(&stats->xec, xec);
1570 fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
1571 fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
1572 fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
1573 stats->stats_idx = id;
1575 /* Update Queue Statistics */
1576 fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
1580 * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
1581 * @hw: pointer to hardware structure
1582 * @stats: pointer to the stats structure to update
1584 * This function resets the base for global and per queue hardware
1587 STATIC void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
1588 struct fm10k_hw_stats *stats)
1590 DEBUGFUNC("fm10k_rebind_hw_stats_pf");
1592 /* Unbind Global Statistics */
1593 fm10k_unbind_hw_stats_32b(&stats->timeout);
1594 fm10k_unbind_hw_stats_32b(&stats->ur);
1595 fm10k_unbind_hw_stats_32b(&stats->ca);
1596 fm10k_unbind_hw_stats_32b(&stats->um);
1597 fm10k_unbind_hw_stats_32b(&stats->xec);
1598 fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
1599 fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
1600 fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
1602 /* Unbind Queue Statistics */
1603 fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
1605 /* Reinitialize bases for all stats */
1606 fm10k_update_hw_stats_pf(hw, stats);
1610 * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
1611 * @hw: pointer to hardware structure
1612 * @dma_mask: 64 bit DMA mask required for platform
1614 * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
1615 * to limit the access to memory beyond what is physically in the system.
1617 STATIC void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
1619 /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
1620 u32 phyaddr = (u32)(dma_mask >> 32);
1622 DEBUGFUNC("fm10k_set_dma_mask_pf");
1624 FM10K_WRITE_REG(hw, FM10K_PHYADDR, phyaddr);
1628 * fm10k_get_fault_pf - Record a fault in one of the interface units
1629 * @hw: pointer to hardware structure
1630 * @type: pointer to fault type register offset
1631 * @fault: pointer to memory location to record the fault
1633 * Record the fault register contents to the fault data structure and
1634 * clear the entry from the register.
1636 * Returns ERR_PARAM if invalid register is specified or no error is present.
1638 STATIC s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
1639 struct fm10k_fault *fault)
1643 DEBUGFUNC("fm10k_get_fault_pf");
1645 /* verify the fault register is in range and is aligned */
1647 case FM10K_PCA_FAULT:
1648 case FM10K_THI_FAULT:
1649 case FM10K_FUM_FAULT:
1652 return FM10K_ERR_PARAM;
1655 /* only service faults that are valid */
1656 func = FM10K_READ_REG(hw, type + FM10K_FAULT_FUNC);
1657 if (!(func & FM10K_FAULT_FUNC_VALID))
1658 return FM10K_ERR_PARAM;
1660 /* read remaining fields */
1661 fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_HI);
1662 fault->address <<= 32;
1663 fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_LO);
1664 fault->specinfo = FM10K_READ_REG(hw, type + FM10K_FAULT_SPECINFO);
1666 /* clear valid bit to allow for next error */
1667 FM10K_WRITE_REG(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
1669 /* Record which function triggered the error */
1670 if (func & FM10K_FAULT_FUNC_PF)
1673 fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
1674 FM10K_FAULT_FUNC_VF_SHIFT);
1676 /* record fault type */
1677 fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
1679 return FM10K_SUCCESS;
1683 * fm10k_request_lport_map_pf - Request LPORT map from the switch API
1684 * @hw: pointer to hardware structure
1687 STATIC s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
1689 struct fm10k_mbx_info *mbx = &hw->mbx;
1692 DEBUGFUNC("fm10k_request_lport_pf");
1694 /* issue request asking for LPORT map */
1695 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
1697 /* load onto outgoing mailbox */
1698 return mbx->ops.enqueue_tx(hw, mbx, msg);
1702 * fm10k_get_host_state_pf - Returns the state of the switch and mailbox
1703 * @hw: pointer to hardware structure
1704 * @switch_ready: pointer to boolean value that will record switch state
1706 * This function will check the DMA_CTRL2 register and mailbox in order
1707 * to determine if the switch is ready for the PF to begin requesting
1708 * addresses and mapping traffic to the local interface.
1710 STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
1714 DEBUGFUNC("fm10k_get_host_state_pf");
1716 /* verify the switch is ready for interaction */
1717 dma_ctrl2 = FM10K_READ_REG(hw, FM10K_DMA_CTRL2);
1718 if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
1719 return FM10K_SUCCESS;
1721 /* retrieve generic host state info */
1722 return fm10k_get_host_state_generic(hw, switch_ready);
1725 /* This structure defines the attibutes to be parsed below */
1726 const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
1727 FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
1728 sizeof(struct fm10k_swapi_error)),
1729 FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
1734 * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
1735 * @hw: Pointer to hardware structure
1736 * @results: pointer array containing parsed data
1737 * @mbx: Pointer to mailbox information structure
1739 * This handler configures the lport mapping based on the reply from the
1742 s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
1743 struct fm10k_mbx_info *mbx)
1749 UNREFERENCED_1PARAMETER(mbx);
1750 DEBUGFUNC("fm10k_msg_lport_map_pf");
1752 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
1757 /* extract values out of the header */
1758 glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
1759 mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
1761 /* verify mask is set and none of the masked bits in glort are set */
1762 if (!mask || (glort & ~mask))
1763 return FM10K_ERR_PARAM;
1765 /* verify the mask is contiguous, and that it is 1's followed by 0's */
1766 if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
1767 return FM10K_ERR_PARAM;
1769 /* record the glort, mask, and port count */
1770 hw->mac.dglort_map = dglort_map;
1772 return FM10K_SUCCESS;
1775 const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
1776 FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
1781 * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
1782 * @hw: Pointer to hardware structure
1783 * @results: pointer array containing parsed data
1784 * @mbx: Pointer to mailbox information structure
1786 * This handler configures the default VLAN for the PF
1788 static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
1789 struct fm10k_mbx_info *mbx)
1795 UNREFERENCED_1PARAMETER(mbx);
1796 DEBUGFUNC("fm10k_msg_update_pvid_pf");
1798 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
1803 /* extract values from the pvid update */
1804 glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
1805 pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
1807 /* if glort is not valid return error */
1808 if (!fm10k_glort_valid_pf(hw, glort))
1809 return FM10K_ERR_PARAM;
1811 /* verify VLAN ID is valid */
1812 if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
1813 return FM10K_ERR_PARAM;
1815 /* record the port VLAN ID value */
1816 hw->mac.default_vid = pvid;
1818 return FM10K_SUCCESS;
1822 * fm10k_record_global_table_data - Move global table data to swapi table info
1823 * @from: pointer to source table data structure
1824 * @to: pointer to destination table info structure
1826 * This function is will copy table_data to the table_info contained in
1829 static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
1830 struct fm10k_swapi_table_info *to)
1832 /* convert from le32 struct to CPU byte ordered values */
1833 to->used = FM10K_LE32_TO_CPU(from->used);
1834 to->avail = FM10K_LE32_TO_CPU(from->avail);
1837 const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
1838 FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
1839 sizeof(struct fm10k_swapi_error)),
1844 * fm10k_msg_err_pf - Message handler for error reply
1845 * @hw: Pointer to hardware structure
1846 * @results: pointer array containing parsed data
1847 * @mbx: Pointer to mailbox information structure
1849 * This handler will capture the data for any error replies to previous
1850 * messages that the PF has sent.
1852 s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
1853 struct fm10k_mbx_info *mbx)
1855 struct fm10k_swapi_error err_msg;
1858 UNREFERENCED_1PARAMETER(mbx);
1859 DEBUGFUNC("fm10k_msg_err_pf");
1861 /* extract structure from message */
1862 err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
1863 &err_msg, sizeof(err_msg));
1867 /* record table status */
1868 fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
1869 fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
1870 fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
1872 /* record SW API status value */
1873 hw->swapi.status = FM10K_LE32_TO_CPU(err_msg.status);
1875 return FM10K_SUCCESS;
1878 /* currently there is no shared 1588 timestamp handler */
1880 const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
1881 FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
1882 sizeof(struct fm10k_swapi_1588_timestamp)),
1886 const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[] = {
1887 FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_CLOCK_OWNER,
1888 sizeof(struct fm10k_swapi_1588_clock_owner)),
1892 const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[] = {
1893 FM10K_TLV_ATTR_U64(FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET),
1898 * fm10k_iov_notify_offset_pf - Notify VF of change in PTP offset
1899 * @hw: pointer to hardware structure
1900 * @vf_info: pointer to the vf info structure
1901 * @offset: 64bit unsigned offset from hardware SYSTIME
1903 * This function sends a message to a given VF to notify it of PTP offset
1906 STATIC void fm10k_iov_notify_offset_pf(struct fm10k_hw *hw,
1907 struct fm10k_vf_info *vf_info,
1912 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
1913 fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_CLK_OFFSET, offset);
1915 if (vf_info->mbx.ops.enqueue_tx)
1916 vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
1920 * fm10k_msg_1588_clock_owner_pf - Message handler for clock ownership from SM
1921 * @hw: pointer to hardware structure
1922 * @results: pointer to array containing parsed data,
1923 * @mbx: Pointer to mailbox information structure
1925 * This handler configures the FM10K_HW_FLAG_CLOCK_OWNER field for the PF
1927 s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *hw, u32 **results,
1928 struct fm10k_mbx_info *mbx)
1930 struct fm10k_swapi_1588_clock_owner msg;
1934 UNREFERENCED_1PARAMETER(mbx);
1935 DEBUGFUNC("fm10k_msg_1588_clock_owner");
1937 err = fm10k_tlv_attr_get_le_struct(
1938 results[FM10K_PF_ATTR_ID_1588_CLOCK_OWNER],
1943 /* We own the clock iff the glort matches us and the enabled field is
1944 * true. Otherwise, the clock must belong to some other port.
1946 glort = le16_to_cpu(msg.glort);
1947 if (fm10k_glort_valid_pf(hw, glort) && msg.enabled)
1948 hw->flags |= FM10K_HW_FLAG_CLOCK_OWNER;
1950 hw->flags &= ~FM10K_HW_FLAG_CLOCK_OWNER;
1952 return FM10K_SUCCESS;
1956 * fm10k_adjust_systime_pf - Adjust systime frequency
1957 * @hw: pointer to hardware structure
1958 * @ppb: adjustment rate in parts per billion
1960 * This function will adjust the SYSTIME_CFG register contained in BAR 4
1961 * if this function is supported for BAR 4 access. The adjustment amount
1962 * is based on the parts per billion value provided and adjusted to a
1963 * value based on parts per 2^48 clock cycles.
1965 * If adjustment is not supported or the requested value is too large
1966 * we will return an error.
1968 STATIC s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
1972 DEBUGFUNC("fm10k_adjust_systime_pf");
1974 /* ensure that we control the clock */
1975 if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER))
1976 return FM10K_ERR_DEVICE_NOT_SUPPORTED;
1978 /* if sw_addr is not set we don't have switch register access */
1980 return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS;
1982 /* we must convert the value from parts per billion to parts per
1983 * 2^48 cycles. In addition I have opted to only use the 30 most
1984 * significant bits of the adjustment value as the 8 least
1985 * significant bits are located in another register and represent
1986 * a value significantly less than a part per billion, the result
1987 * of dropping the 8 least significant bits is that the adjustment
1988 * value is effectively multiplied by 2^8 when we write it.
1990 * As a result of all this the math for this breaks down as follows:
1991 * ppb / 10^9 == adjust * 2^8 / 2^48
1992 * If we solve this for adjust, and simplify it comes out as:
1993 * ppb * 2^31 / 5^9 == adjust
1995 systime_adjust = (ppb < 0) ? -ppb : ppb;
1996 systime_adjust <<= 31;
1997 do_div(systime_adjust, 1953125);
1999 /* verify the requested adjustment value is in range */
2000 if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
2001 return FM10K_ERR_PARAM;
2004 systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
2006 FM10K_WRITE_SW_REG(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
2008 return FM10K_SUCCESS;
2012 * fm10k_notify_offset_pf - Notify switch of change in PTP offset
2013 * @hw: pointer to hardware structure
2014 * @offset: 64bit unsigned offset of SYSTIME
2016 * This function sends a message to the switch to indicate a change in the
2017 * offset of the hardware SYSTIME registers. The switch manager is
2018 * responsible for transmitting this message to other hosts.
2020 STATIC s32 fm10k_notify_offset_pf(struct fm10k_hw *hw, u64 offset)
2022 struct fm10k_mbx_info *mbx = &hw->mbx;
2025 DEBUGFUNC("fm10k_notify_offset_pf");
2027 /* ensure that we control the clock */
2028 if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER))
2029 return FM10K_ERR_DEVICE_NOT_SUPPORTED;
2031 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_MASTER_CLK_OFFSET);
2032 fm10k_tlv_attr_put_u64(msg, FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET, offset);
2034 /* load onto outgoing mailbox */
2035 return mbx->ops.enqueue_tx(hw, mbx, msg);
2039 * fm10k_read_systime_pf - Reads value of systime registers
2040 * @hw: pointer to the hardware structure
2042 * Function reads the content of 2 registers, combined to represent a 64 bit
2043 * value measured in nanosecods. In order to guarantee the value is accurate
2044 * we check the 32 most significant bits both before and after reading the
2045 * 32 least significant bits to verify they didn't change as we were reading
2048 static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
2050 u32 systime_l, systime_h, systime_tmp;
2052 systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
2055 systime_tmp = systime_h;
2056 systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
2057 systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
2058 } while (systime_tmp != systime_h);
2060 return ((u64)systime_h << 32) | systime_l;
2063 static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
2064 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2065 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2066 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2067 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2068 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2069 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2070 FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(fm10k_msg_1588_clock_owner_pf),
2071 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2075 * fm10k_init_ops_pf - Inits func ptrs and MAC type
2076 * @hw: pointer to hardware structure
2078 * Initialize the function pointers and assign the MAC type for PF.
2079 * Does not touch the hardware.
2081 s32 fm10k_init_ops_pf(struct fm10k_hw *hw)
2083 struct fm10k_mac_info *mac = &hw->mac;
2084 struct fm10k_iov_info *iov = &hw->iov;
2086 DEBUGFUNC("fm10k_init_ops_pf");
2088 fm10k_init_ops_generic(hw);
2090 mac->ops.reset_hw = &fm10k_reset_hw_pf;
2091 mac->ops.init_hw = &fm10k_init_hw_pf;
2092 mac->ops.start_hw = &fm10k_start_hw_generic;
2093 mac->ops.stop_hw = &fm10k_stop_hw_generic;
2094 #ifndef NO_IS_SLOT_APPROPRIATE_CHECK
2095 mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_pf;
2097 mac->ops.update_vlan = &fm10k_update_vlan_pf;
2098 mac->ops.read_mac_addr = &fm10k_read_mac_addr_pf;
2099 mac->ops.update_uc_addr = &fm10k_update_uc_addr_pf;
2100 mac->ops.update_mc_addr = &fm10k_update_mc_addr_pf;
2101 mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_pf;
2102 mac->ops.update_int_moderator = &fm10k_update_int_moderator_pf;
2103 mac->ops.update_lport_state = &fm10k_update_lport_state_pf;
2104 mac->ops.update_hw_stats = &fm10k_update_hw_stats_pf;
2105 mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_pf;
2106 mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_pf;
2107 mac->ops.set_dma_mask = &fm10k_set_dma_mask_pf;
2108 mac->ops.get_fault = &fm10k_get_fault_pf;
2109 mac->ops.get_host_state = &fm10k_get_host_state_pf;
2110 mac->ops.request_lport_map = &fm10k_request_lport_map_pf;
2111 mac->ops.adjust_systime = &fm10k_adjust_systime_pf;
2112 mac->ops.notify_offset = &fm10k_notify_offset_pf;
2113 mac->ops.read_systime = &fm10k_read_systime_pf;
2115 mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
2117 iov->ops.assign_resources = &fm10k_iov_assign_resources_pf;
2118 iov->ops.configure_tc = &fm10k_iov_configure_tc_pf;
2119 iov->ops.assign_int_moderator = &fm10k_iov_assign_int_moderator_pf;
2120 iov->ops.assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf;
2121 iov->ops.reset_resources = &fm10k_iov_reset_resources_pf;
2122 iov->ops.set_lport = &fm10k_iov_set_lport_pf;
2123 iov->ops.reset_lport = &fm10k_iov_reset_lport_pf;
2124 iov->ops.update_stats = &fm10k_iov_update_stats_pf;
2125 iov->ops.notify_offset = &fm10k_iov_notify_offset_pf;
2127 return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);