X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fbase%2Fixgbe_82599.c;h=d9d11a8ec1cec44c61c2aa0a015ab55ee6819a4f;hb=ed0cc677ad22f1903329a227c72304db32ef5ef4;hp=f0deb596087f040acce970bc32676509b084ea13;hpb=f3430431abaf8708fb6c98236b1648672608a8a7;p=dpdk.git diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c index f0deb59608..d9d11a8ec1 100644 --- a/drivers/net/ixgbe/base/ixgbe_82599.c +++ b/drivers/net/ixgbe/base/ixgbe_82599.c @@ -378,8 +378,8 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) & - IXGBE_FWSM_MODE_MASK) ? true : false; + mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) + & IXGBE_FWSM_MODE_MASK); hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; @@ -1176,12 +1176,16 @@ mac_reset_top: /* Add the SAN MAC address to the RAR only if it's a valid address */ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } @@ -1369,7 +1373,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, * Continue setup of fdirctrl register bits: * Turn perfect match filtering on * Report hash in RSS field of Rx wb descriptor - * Initialize the drop queue + * Initialize the drop queue to queue 127 * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 (0x4 * 16) filters are left @@ -1391,6 +1395,40 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, return IXGBE_SUCCESS; } +/** + * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue + * @hw: pointer to hardware structure + * @dropqueue: Rx queue index used for the dropped packets + **/ +void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) +{ + u32 fdirctrl; + + DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); + /* Clear init done bit and drop queue field */ + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); + + /* Set drop queue */ + fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); + if ((hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x) || + (hw->mac.type == ixgbe_mac_X550EM_a)) + fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); +} + /* * These defines allow us to quickly generate all of the necessary instructions * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION @@ -1491,16 +1529,15 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, * Note that the tunnel bit in input must not be set when the hardware * tunneling support does not exist. **/ -s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common, - u8 queue) +void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue) { u64 fdirhashcmd; u8 flow_type; bool tunnel; u32 fdircmd; - s32 err; DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); @@ -1522,7 +1559,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, break; default: DEBUGOUT(" Error on flow type input\n"); - return IXGBE_ERR_CONFIG; + return; } /* configure FDIRCMD register */ @@ -1541,15 +1578,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); - err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); - if (err) { - DEBUGOUT("Flow Director command did not complete!\n"); - return err; - } - DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); - return IXGBE_SUCCESS; + return; } #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ @@ -1779,14 +1810,23 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, } IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); - /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSIP4M and - * FDIRDIP4M in cloud mode to allow L3/L3 packets to - * tunnel. + /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM, + * FDIRSIP4M and FDIRDIP4M in cloud mode to allow + * L3/L3 packets to tunnel. */ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); + break; + default: + break; + } } /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ @@ -1804,6 +1844,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); break; default: @@ -2128,9 +2169,9 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) * * Determines physical layer capabilities of the current configuration. **/ -u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) +u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) { - u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;