net/nfp: handle packets with length 0 as usual ones
[dpdk.git] / drivers / net / qede / base / ecore_vf.c
index 60ecd16..f4d331c 100644 (file)
@@ -451,6 +451,160 @@ free_p_iov:
 #define MSTORM_QZONE_START(dev)   (TSTORM_QZONE_START + \
                                   (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
 
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+                            struct ecore_tunn_update_type *p_src,
+                            enum ecore_tunn_mode mask, u8 *p_cls)
+{
+       if (p_src->b_update_mode) {
+               p_req->tun_mode_update_mask |= (1 << mask);
+
+               if (p_src->b_mode_enabled)
+                       p_req->tunn_mode |= (1 << mask);
+       }
+
+       *p_cls = p_src->tun_cls;
+}
+
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+                          struct ecore_tunn_update_type *p_src,
+                          enum ecore_tunn_mode mask, u8 *p_cls,
+                          struct ecore_tunn_update_udp_port *p_port,
+                          u8 *p_update_port, u16 *p_udp_port)
+{
+       if (p_port->b_update_port) {
+               *p_update_port = 1;
+               *p_udp_port = p_port->port;
+       }
+
+       __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
+}
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun)
+{
+       if (p_tun->vxlan.b_mode_enabled)
+               p_tun->vxlan.b_update_mode = true;
+       if (p_tun->l2_geneve.b_mode_enabled)
+               p_tun->l2_geneve.b_update_mode = true;
+       if (p_tun->ip_geneve.b_mode_enabled)
+               p_tun->ip_geneve.b_update_mode = true;
+       if (p_tun->l2_gre.b_mode_enabled)
+               p_tun->l2_gre.b_update_mode = true;
+       if (p_tun->ip_gre.b_mode_enabled)
+               p_tun->ip_gre.b_update_mode = true;
+
+       p_tun->b_update_rx_cls = true;
+       p_tun->b_update_tx_cls = true;
+}
+
+static void
+__ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun,
+                            u16 feature_mask, u8 tunn_mode, u8 tunn_cls,
+                            enum ecore_tunn_mode val)
+{
+       if (feature_mask & (1 << val)) {
+               p_tun->b_mode_enabled = tunn_mode;
+               p_tun->tun_cls = tunn_cls;
+       } else {
+               p_tun->b_mode_enabled = false;
+       }
+}
+
+static void
+ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn,
+                          struct ecore_tunnel_info *p_tun,
+                          struct pfvf_update_tunn_param_tlv *p_resp)
+{
+       /* Update mode and classes provided by PF */
+       u16 feat_mask = p_resp->tunn_feature_mask;
+
+       __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
+                                    p_resp->vxlan_mode, p_resp->vxlan_clss,
+                                    ECORE_MODE_VXLAN_TUNN);
+       __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
+                                    p_resp->l2geneve_mode,
+                                    p_resp->l2geneve_clss,
+                                    ECORE_MODE_L2GENEVE_TUNN);
+       __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
+                                    p_resp->ipgeneve_mode,
+                                    p_resp->ipgeneve_clss,
+                                    ECORE_MODE_IPGENEVE_TUNN);
+       __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
+                                    p_resp->l2gre_mode, p_resp->l2gre_clss,
+                                    ECORE_MODE_L2GRE_TUNN);
+       __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
+                                    p_resp->ipgre_mode, p_resp->ipgre_clss,
+                                    ECORE_MODE_IPGRE_TUNN);
+       p_tun->geneve_port.port = p_resp->geneve_udp_port;
+       p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
+                  p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
+                  p_tun->ip_geneve.b_mode_enabled,
+                  p_tun->l2_gre.b_mode_enabled,
+                  p_tun->ip_gre.b_mode_enabled);
+}
+
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+                               struct ecore_tunnel_info *p_src)
+{
+       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_update_tunn_param_tlv *p_resp;
+       struct vfpf_update_tunn_param_tlv *p_req;
+       enum _ecore_status_t rc;
+
+       p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
+                                sizeof(*p_req));
+
+       if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
+               p_req->update_tun_cls = 1;
+
+       ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, ECORE_MODE_VXLAN_TUNN,
+                                  &p_req->vxlan_clss, &p_src->vxlan_port,
+                                  &p_req->update_vxlan_port,
+                                  &p_req->vxlan_port);
+       ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
+                                  ECORE_MODE_L2GENEVE_TUNN,
+                                  &p_req->l2geneve_clss, &p_src->geneve_port,
+                                  &p_req->update_geneve_port,
+                                  &p_req->geneve_port);
+       __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
+                                    ECORE_MODE_IPGENEVE_TUNN,
+                                    &p_req->ipgeneve_clss);
+       __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
+                                    ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
+       __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
+                                    ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
+       rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+
+       if (rc)
+               goto exit;
+
+       if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Failed to update tunnel parameters\n");
+               rc = ECORE_INVAL;
+       }
+
+       ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
+exit:
+       ecore_vf_pf_req_end(p_hwfn, rc);
+       return rc;
+}
+
 enum _ecore_status_t
 ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
                      struct ecore_queue_cid *p_cid,
@@ -978,6 +1132,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
        if (p_params->rss_params) {
                struct ecore_rss_params *rss_params = p_params->rss_params;
                struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+               int i, table_size;
 
                size = sizeof(struct vfpf_vport_update_rss_tlv);
                p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -999,8 +1154,16 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
                p_rss_tlv->rss_enable = rss_params->rss_enable;
                p_rss_tlv->rss_caps = rss_params->rss_caps;
                p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
-               OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
-                           sizeof(rss_params->rss_ind_table));
+
+               table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE,
+                                       1 << p_rss_tlv->rss_table_size_log);
+               for (i = 0; i < table_size; i++) {
+                       struct ecore_queue_cid *p_queue;
+
+                       p_queue = rss_params->rss_ind_table[i];
+                       p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+               }
+
                OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
                            sizeof(rss_params->rss_key));
        }
@@ -1122,8 +1285,8 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
        struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
        struct pfvf_def_resp_tlv *resp;
        struct vfpf_first_tlv *req;
-       enum _ecore_status_t rc;
        u32 size;
+       enum _ecore_status_t rc;
 
        /* clear mailbox and prep first tlv */
        req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
@@ -1261,6 +1424,48 @@ exit:
        return rc;
 }
 
+enum _ecore_status_t
+ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
+                        struct ecore_queue_cid     *p_cid)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_update_coalesce *req;
+       struct pfvf_def_resp_tlv *resp;
+       enum _ecore_status_t rc;
+
+       /* clear mailbox and prep header tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE,
+                              sizeof(*req));
+
+       req->rx_coal = rx_coal;
+       req->tx_coal = tx_coal;
+       req->qid = p_cid->rel.queue_id;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
+                  rx_coal, tx_coal, req->qid);
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       resp = &p_iov->pf2vf_reply->default_resp;
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+       if (rc != ECORE_SUCCESS)
+               goto exit;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               goto exit;
+
+       p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+       p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+
+exit:
+       ecore_vf_pf_req_end(p_hwfn, rc);
+       return rc;
+}
+
 u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
                           u16               sb_id)
 {
@@ -1377,6 +1582,12 @@ void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
        *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
 }
 
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+                          u8 *num_txqs)
+{
+       *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
+}
+
 void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
 {
        OSAL_MEMCPY(port_mac,
@@ -1447,6 +1658,18 @@ bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
        return true;
 }
 
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+                                    u16 *p_vxlan_port,
+                                    u16 *p_geneve_port)
+{
+       struct ecore_bulletin_content *p_bulletin;
+
+       p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+       *p_vxlan_port = p_bulletin->vxlan_udp_port;
+       *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
 bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
 {
        struct ecore_bulletin_content *bulletin;