+ if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
+ lio_dev_err(lio_dev,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+ reta_size, LIO_RSS_MAX_TABLE_SZ);
+ return -EINVAL;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
+ ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ rss_param->param.flags = 0xF;
+ rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
+ rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
+
+ for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
+ index = (i * RTE_RETA_GROUP_SIZE) + j;
+ rss_state->itable[index] = reta_conf[i].reta[j];
+ }
+ }
+ }
+
+ rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
+ memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
+
+ lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to set rss hash\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Set rss hash timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ int i, num;
+
+ if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
+ lio_dev_err(lio_dev,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+ reta_size, LIO_RSS_MAX_TABLE_SZ);
+ return -EINVAL;
+ }
+
+ num = reta_size / RTE_RETA_GROUP_SIZE;
+
+ for (i = 0; i < num; i++) {
+ memcpy(reta_conf->reta,
+ &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
+ RTE_RETA_GROUP_SIZE);
+ reta_conf++;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ uint8_t *hash_key = NULL;
+ uint64_t rss_hf = 0;
+
+ if (rss_state->hash_disable) {
+ lio_dev_info(lio_dev, "RSS disabled in nic\n");
+ rss_conf->rss_hf = 0;
+ return 0;
+ }
+
+ /* Get key value */
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL)
+ memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
+
+ if (rss_state->ip)
+ rss_hf |= ETH_RSS_IPV4;
+ if (rss_state->tcp_hash)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (rss_state->ipv6)
+ rss_hf |= ETH_RSS_IPV6;
+ if (rss_state->ipv6_tcp_hash)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (rss_state->ipv6_ex)
+ rss_hf |= ETH_RSS_IPV6_EX;
+ if (rss_state->ipv6_tcp_ex_hash)
+ rss_hf |= ETH_RSS_IPV6_TCP_EX;
+
+ rss_conf->rss_hf = rss_hf;
+
+ return 0;
+}
+
+static int
+lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct lio_rss_set *rss_param;
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
+ ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ rss_param->param.flags = 0xF;
+
+ if (rss_conf->rss_key) {
+ rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
+ rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
+ rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
+ memcpy(rss_state->hash_key, rss_conf->rss_key,
+ rss_state->hash_key_size);
+ memcpy(rss_param->key, rss_state->hash_key,
+ rss_state->hash_key_size);
+ }
+
+ if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
+ /* Can't disable rss through hash flags,
+ * if it is enabled by default during init
+ */
+ if (!rss_state->hash_disable)
+ return -EINVAL;
+
+ /* This is for --disable-rss during testpmd launch */
+ rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
+ } else {
+ uint32_t hashinfo = 0;
+
+ /* Can't enable rss if disabled by default during init */
+ if (rss_state->hash_disable)
+ return -EINVAL;
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+ hashinfo |= LIO_RSS_HASH_IPV4;
+ rss_state->ip = 1;
+ } else {
+ rss_state->ip = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV4;
+ rss_state->tcp_hash = 1;
+ } else {
+ rss_state->tcp_hash = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+ hashinfo |= LIO_RSS_HASH_IPV6;
+ rss_state->ipv6 = 1;
+ } else {
+ rss_state->ipv6 = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV6;
+ rss_state->ipv6_tcp_hash = 1;
+ } else {
+ rss_state->ipv6_tcp_hash = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+ hashinfo |= LIO_RSS_HASH_IPV6_EX;
+ rss_state->ipv6_ex = 1;
+ } else {
+ rss_state->ipv6_ex = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
+ rss_state->ipv6_tcp_ex_hash = 1;
+ } else {
+ rss_state->ipv6_tcp_ex_hash = 0;
+ }
+
+ rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
+ rss_param->param.hashinfo = hashinfo;
+ }
+
+ lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to set rss hash\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Set rss hash timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Add vxlan dest udp port for an interface.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param udp_tnl
+ * udp tunnel conf
+ *
+ * @return
+ * On success return 0
+ * On failure return -1
+ */
+static int
+lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tnl)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (udp_tnl == NULL)
+ return -EINVAL;
+
+ if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ lio_dev_err(lio_dev, "Unsupported tunnel type\n");
+ return -1;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
+ ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
+ ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Remove vxlan dest udp port for an interface.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param udp_tnl
+ * udp tunnel conf
+ *
+ * @return
+ * On success return 0
+ * On failure return -1
+ */
+static int
+lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tnl)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (udp_tnl == NULL)
+ return -EINVAL;
+
+ if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ lio_dev_err(lio_dev, "Unsupported tunnel type\n");
+ return -1;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
+ ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
+ ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (lio_dev->linfo.vlan_is_admin_assigned)
+ return -EPERM;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = on ?
+ LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
+ ctrl_pkt.ncmd.s.param1 = vlan_id;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
+ on ? "add" : "remove");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
+ on ? "add" : "remove");
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint64_t
+lio_hweight64(uint64_t w)
+{
+ uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
+
+ res =
+ (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+}
+
+static int
+lio_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_eth_link link;
+
+ /* Initialize */
+ memset(&link, 0, sizeof(link));
+ link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
+
+ /* Return what we found */
+ if (lio_dev->linfo.link.s.link_up == 0) {
+ /* Interface is down */
+ return rte_eth_linkstatus_set(eth_dev, &link);
+ }
+
+ link.link_status = ETH_LINK_UP; /* Interface is up */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ switch (lio_dev->linfo.link.s.speed) {
+ case LIO_LINK_SPEED_10000:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case LIO_LINK_SPEED_25000:
+ link.link_speed = ETH_SPEED_NUM_25G;
+ break;
+ default:
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ }
+
+ return rte_eth_linkstatus_set(eth_dev, &link);
+}
+
+/**
+ * \brief Net device enable, disable allmulticast
+ * @param eth_dev Pointer to the structure rte_eth_dev
+ */
+static void
+lio_change_dev_flag(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
+ ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send change flag message\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "Change dev flag command timed out\n");
+}
+
+static void
+lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
+ lio_dev_err(lio_dev, "Require firmware version >= %s\n",
+ LIO_VF_TRUST_MIN_VERSION);
+ return;
+ }
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags |= LIO_IFFLAG_PROMISC;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
+ lio_dev_err(lio_dev, "Require firmware version >= %s\n",
+ LIO_VF_TRUST_MIN_VERSION);
+ return;
+ }
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct rte_eth_rss_reta_entry64 reta_conf[8];
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t i;
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
+ rss_state->hash_disable = 1;
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+ return;
+ }
+
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = lio_rss_key; /* Default hash key */
+
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+
+ memset(reta_conf, 0, sizeof(reta_conf));
+ for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
+ uint8_t q_idx, conf_idx, reta_idx;
+
+ q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
+ i % eth_dev->data->nb_rx_queues : 0);
+ conf_idx = i / RTE_RETA_GROUP_SIZE;
+ reta_idx = i % RTE_RETA_GROUP_SIZE;
+ reta_conf[conf_idx].reta[reta_idx] = q_idx;
+ reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
+ }
+
+ lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);