1 /*******************************************************************************
3 Copyright (c) 2001-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
35 #include "ixgbe_type.h"
36 #include "ixgbe_dcb.h"
37 #include "ixgbe_dcb_82599.h"
38 #ident "$Id: ixgbe_dcb_82599.c,v 1.67 2012/03/30 06:45:33 jtkirshe Exp $"
41 * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
42 * @hw: pointer to hardware structure
43 * @stats: pointer to statistics structure
44 * @tc_count: Number of elements in bwg_array.
46 * This function returns the status data for each of the Traffic Classes in use.
48 s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
49 struct ixgbe_hw_stats *stats,
54 DEBUGFUNC("dcb_get_tc_stats");
56 if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
57 return IXGBE_ERR_PARAM;
59 /* Statistics pertaining to each traffic class */
60 for (tc = 0; tc < tc_count; tc++) {
61 /* Transmitted Packets */
62 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
63 /* Transmitted Bytes (read low first to prevent missed carry) */
64 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
66 (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
67 /* Received Packets */
68 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
69 /* Received Bytes (read low first to prevent missed carry) */
70 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
72 (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
74 /* Received Dropped Packet */
75 stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
82 * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
83 * @hw: pointer to hardware structure
84 * @stats: pointer to statistics structure
85 * @tc_count: Number of elements in bwg_array.
87 * This function returns the CBFC status data for each of the Traffic Classes.
89 s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
90 struct ixgbe_hw_stats *stats,
95 DEBUGFUNC("dcb_get_pfc_stats");
97 if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
98 return IXGBE_ERR_PARAM;
100 for (tc = 0; tc < tc_count; tc++) {
101 /* Priority XOFF Transmitted */
102 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
103 /* Priority XOFF Received */
104 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
107 return IXGBE_SUCCESS;
111 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
112 * @hw: pointer to hardware structure
113 * @dcb_config: pointer to ixgbe_dcb_config structure
115 * Configure Rx Packet Arbiter and credits for each traffic class.
117 s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
118 u16 *max, u8 *bwg_id, u8 *tsa,
122 u32 credit_refill = 0;
127 * Disable the arbiter before changing parameters
128 * (always enable recycle mode; WSP)
130 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
131 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
134 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
135 * bits sets for the UPs that needs to be mappped to that TC.
136 * e.g if priorities 6 and 7 are to be mapped to a TC then the
137 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
140 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
141 reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
143 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
145 /* Configure traffic class credits and priority */
146 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
147 credit_refill = refill[i];
149 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
151 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
153 if (tsa[i] == ixgbe_dcb_tsa_strict)
154 reg |= IXGBE_RTRPT4C_LSP;
156 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
160 * Configure Rx packet plane (recycle mode; WSP) and
163 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
164 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
166 return IXGBE_SUCCESS;
170 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
171 * @hw: pointer to hardware structure
172 * @dcb_config: pointer to ixgbe_dcb_config structure
174 * Configure Tx Descriptor Arbiter and credits for each traffic class.
176 s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
177 u16 *max, u8 *bwg_id, u8 *tsa)
179 u32 reg, max_credits;
182 /* Clear the per-Tx queue credits; we use per-TC instead */
183 for (i = 0; i < 128; i++) {
184 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
185 IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
188 /* Configure traffic class credits and priority */
189 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
190 max_credits = max[i];
191 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
193 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
195 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
196 reg |= IXGBE_RTTDT2C_GSP;
198 if (tsa[i] == ixgbe_dcb_tsa_strict)
199 reg |= IXGBE_RTTDT2C_LSP;
201 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
205 * Configure Tx descriptor plane (recycle mode; WSP) and
208 reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
209 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
211 return IXGBE_SUCCESS;
215 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
216 * @hw: pointer to hardware structure
217 * @dcb_config: pointer to ixgbe_dcb_config structure
219 * Configure Tx Packet Arbiter and credits for each traffic class.
221 s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
222 u16 *max, u8 *bwg_id, u8 *tsa,
229 * Disable the arbiter before changing parameters
230 * (always enable recycle mode; SP; arb delay)
232 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
233 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
235 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
238 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
239 * bits sets for the UPs that needs to be mappped to that TC.
240 * e.g if priorities 6 and 7 are to be mapped to a TC then the
241 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
244 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
245 reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
247 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
249 /* Configure traffic class credits and priority */
250 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
252 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
253 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
255 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
256 reg |= IXGBE_RTTPT2C_GSP;
258 if (tsa[i] == ixgbe_dcb_tsa_strict)
259 reg |= IXGBE_RTTPT2C_LSP;
261 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
265 * Configure Tx packet plane (recycle mode; SP; arb delay) and
268 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
269 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
270 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
272 return IXGBE_SUCCESS;
276 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
277 * @hw: pointer to hardware structure
278 * @pfc_en: enabled pfc bitmask
279 * @map: priority to tc assignments indexed by priority
281 * Configure Priority Flow Control (PFC) for each traffic class.
283 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
285 u32 i, j, fcrtl, reg;
288 /* Enable Transmit Priority Flow Control */
289 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
291 /* Enable Receive Priority Flow Control */
292 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
293 reg |= IXGBE_MFLCN_DPF;
296 * X540 supports per TC Rx priority flow control. So
297 * clear all TCs and only enable those that should be
300 reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
302 if (hw->mac.type == ixgbe_mac_X540)
303 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
306 reg |= IXGBE_MFLCN_RPFCE;
308 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
310 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
316 /* Configure PFC Tx thresholds per TC */
317 for (i = 0; i <= max_tc; i++) {
320 for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
321 if ((map[j] == i) && (pfc_en & (1 << j))) {
328 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
329 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
330 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
333 * In order to prevent Tx hangs when the internal Tx
334 * switch is enabled we must set the high water mark
335 * to the Rx packet buffer size - 24KB. This allows
336 * the Tx switch to function even under heavy Rx
339 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
340 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
343 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
346 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
347 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
348 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
351 /* Configure pause time (2 TCs per register) */
352 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
353 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
354 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
356 /* Configure flow control refresh threshold value */
357 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
359 return IXGBE_SUCCESS;
363 * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
364 * @hw: pointer to hardware structure
366 * Configure queue statistics registers, all queues belonging to same traffic
367 * class uses a single set of queue statistics counters.
369 s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
370 struct ixgbe_dcb_config *dcb_config)
375 bool vt_mode = false;
377 if (dcb_config != NULL) {
378 tc_count = dcb_config->num_tcs.pg_tcs;
379 vt_mode = dcb_config->vt_mode;
382 if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
383 return IXGBE_ERR_PARAM;
385 if (tc_count == 8 && vt_mode == false) {
387 * Receive Queues stats setting
388 * 32 RQSMR registers, each configuring 4 queues.
390 * Set all 16 queues of each TC to the same stat
391 * with TC 'n' going to stat 'n'.
393 for (i = 0; i < 32; i++) {
394 reg = 0x01010101 * (i / 4);
395 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
398 * Transmit Queues stats setting
399 * 32 TQSM registers, each controlling 4 queues.
401 * Set all queues of each TC to the same stat
402 * with TC 'n' going to stat 'n'.
403 * Tx queues are allocated non-uniformly to TCs:
404 * 32, 32, 16, 16, 8, 8, 8, 8.
406 for (i = 0; i < 32; i++) {
423 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
425 } else if (tc_count == 4 && vt_mode == false) {
427 * Receive Queues stats setting
428 * 32 RQSMR registers, each configuring 4 queues.
430 * Set all 16 queues of each TC to the same stat
431 * with TC 'n' going to stat 'n'.
433 for (i = 0; i < 32; i++) {
435 /* In 4 TC mode, odd 16-queue ranges are
439 reg = 0x01010101 * (i / 8);
440 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
443 * Transmit Queues stats setting
444 * 32 TQSM registers, each controlling 4 queues.
446 * Set all queues of each TC to the same stat
447 * with TC 'n' going to stat 'n'.
448 * Tx queues are allocated non-uniformly to TCs:
451 for (i = 0; i < 32; i++) {
460 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
462 } else if (tc_count == 4 && vt_mode == true) {
464 * Receive Queues stats setting
465 * 32 RQSMR registers, each configuring 4 queues.
467 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
468 * pool. Set all 32 queues of each TC across pools to the same
469 * stat with TC 'n' going to stat 'n'.
471 for (i = 0; i < 32; i++)
472 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
474 * Transmit Queues stats setting
475 * 32 TQSM registers, each controlling 4 queues.
477 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
478 * pool. Set all 32 queues of each TC across pools to the same
479 * stat with TC 'n' going to stat 'n'.
481 for (i = 0; i < 32; i++)
482 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
485 return IXGBE_SUCCESS;
489 * ixgbe_dcb_config_82599 - Configure general DCB parameters
490 * @hw: pointer to hardware structure
491 * @dcb_config: pointer to ixgbe_dcb_config structure
493 * Configure general DCB parameters.
495 s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
496 struct ixgbe_dcb_config *dcb_config)
501 /* Disable the Tx desc arbiter so that MTQC can be changed */
502 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
503 reg |= IXGBE_RTTDCS_ARBDIS;
504 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
506 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
507 if (dcb_config->num_tcs.pg_tcs == 8) {
508 /* Enable DCB for Rx with 8 TCs */
509 switch (reg & IXGBE_MRQC_MRQE_MASK) {
511 case IXGBE_MRQC_RT4TCEN:
512 /* RSS disabled cases */
513 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
516 case IXGBE_MRQC_RSSEN:
517 case IXGBE_MRQC_RTRSS4TCEN:
518 /* RSS enabled cases */
519 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
520 IXGBE_MRQC_RTRSS8TCEN;
524 * Unsupported value, assume stale data,
528 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
532 if (dcb_config->num_tcs.pg_tcs == 4) {
533 /* We support both VT-on and VT-off with 4 TCs. */
534 if (dcb_config->vt_mode)
535 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
536 IXGBE_MRQC_VMDQRT4TCEN;
538 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
539 IXGBE_MRQC_RTRSS4TCEN;
541 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
543 /* Enable DCB for Tx with 8 TCs */
544 if (dcb_config->num_tcs.pg_tcs == 8)
545 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
547 /* We support both VT-on and VT-off with 4 TCs. */
548 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
549 if (dcb_config->vt_mode)
550 reg |= IXGBE_MTQC_VT_ENA;
552 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
554 /* Disable drop for all queues */
555 for (q = 0; q < 128; q++)
556 IXGBE_WRITE_REG(hw, IXGBE_QDE,
557 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
559 /* Enable the Tx desc arbiter */
560 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
561 reg &= ~IXGBE_RTTDCS_ARBDIS;
562 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
564 /* Enable Security TX Buffer IFG for DCB */
565 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
566 reg |= IXGBE_SECTX_DCB;
567 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
569 return IXGBE_SUCCESS;
573 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
574 * @hw: pointer to hardware structure
575 * @dcb_config: pointer to ixgbe_dcb_config structure
577 * Configure dcb settings and enable dcb mode.
579 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
580 u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
583 UNREFERENCED_1PARAMETER(link_speed);
585 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
587 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
589 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
592 return IXGBE_SUCCESS;