ETH_TXQ_FLAGS_NOOFFLOADS,
};
dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
}
uint32_t reta, r;
uint16_t idx, shift;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+ if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)\n", reta_size, sp_reta_size);
return -EINVAL;
}
IXGBE_4_BIT_MASK);
if (!mask)
continue;
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
if (mask == IXGBE_4_BIT_MASK)
r = 0;
else
- r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+ r = IXGBE_READ_REG(hw, reta_reg);
for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
if (mask & (0x1 << j))
reta |= reta_conf[idx].reta[shift + j] <<
reta |= r & (IXGBE_8_BIT_MASK <<
(CHAR_BIT * j));
}
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ IXGBE_WRITE_REG(hw, reta_reg, reta);
}
return 0;
uint32_t reta;
uint16_t idx, shift;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+ if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)\n", reta_size, sp_reta_size);
return -EINVAL;
}
- for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
+ for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
if (!mask)
continue;
- reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+ reta = IXGBE_READ_REG(hw, reta_reg);
for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
if (mask & (0x1 << j))
reta_conf[idx].reta[shift + j] =
return eeprom->ops.write_buffer(hw, first, length, data);
}
+uint16_t
+ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ return ETH_RSS_RETA_SIZE_512;
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return ETH_RSS_RETA_SIZE_64;
+ default:
+ return ETH_RSS_RETA_SIZE_128;
+ }
+}
+
+uint32_t
+ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
+ switch (mac_type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ if (reta_idx < ETH_RSS_RETA_SIZE_128)
+ return IXGBE_RETA(reta_idx >> 2);
+ else
+ return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ return IXGBE_VFRETA(reta_idx >> 2);
+ default:
+ return IXGBE_RETA(reta_idx >> 2);
+ }
+}
+
static struct rte_driver rte_ixgbe_driver = {
.type = PMD_PDEV,
.init = rte_ixgbe_pmd_init,
uint32_t reta;
uint16_t i;
uint16_t j;
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+
/*
* Fill in redirection table
* The byte-swap is needed because NIC registers are in
* little-endian order.
*/
reta = 0;
- for (i = 0, j = 0; i < 128; i++, j++) {
+ for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
if (j == dev->data->nb_rx_queues)
j = 0;
reta = (reta << 8) | j;
if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
+ IXGBE_WRITE_REG(hw, reta_reg,
rte_bswap32(reta));
}