554a8491df89f3cde132dac133c0522dd5bba3a4
[dpdk.git] / drivers / net / bnxt / tf_core / tf_msg.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <stdlib.h>
9
10 #include "bnxt.h"
11 #include "tf_core.h"
12 #include "tf_session.h"
13 #include "tfp.h"
14
15 #include "tf_msg_common.h"
16 #include "tf_msg.h"
17 #include "hsi_struct_def_dpdk.h"
18 #include "hwrm_tf.h"
19 #include "tf_em.h"
20
21 /**
22  * Endian converts min and max values from the HW response to the query
23  */
24 #define TF_HW_RESP_TO_QUERY(query, index, response, element) do {            \
25         (query)->hw_query[index].min =                                       \
26                 tfp_le_to_cpu_16(response. element ## _min);                 \
27         (query)->hw_query[index].max =                                       \
28                 tfp_le_to_cpu_16(response. element ## _max);                 \
29 } while (0)
30
31 /**
32  * Endian converts the number of entries from the alloc to the request
33  */
34 #define TF_HW_ALLOC_TO_REQ(alloc, index, request, element)                   \
35         (request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index]))
36
37 /**
38  * Endian converts the start and stride value from the free to the request
39  */
40 #define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do {            \
41         request.element ## _start =                                          \
42                 tfp_cpu_to_le_16(hw_entry[index].start);                     \
43         request.element ## _stride =                                         \
44                 tfp_cpu_to_le_16(hw_entry[index].stride);                    \
45 } while (0)
46
47 /**
48  * Endian converts the start and stride from the HW response to the
49  * alloc
50  */
51 #define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do {         \
52         hw_entry[index].start =                                              \
53                 tfp_le_to_cpu_16(response.element ## _start);                \
54         hw_entry[index].stride =                                             \
55                 tfp_le_to_cpu_16(response.element ## _stride);               \
56 } while (0)
57
58 /**
59  * Endian converts min and max values from the SRAM response to the
60  * query
61  */
62 #define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do {          \
63         (query)->sram_query[index].min =                                     \
64                 tfp_le_to_cpu_16(response.element ## _min);                  \
65         (query)->sram_query[index].max =                                     \
66                 tfp_le_to_cpu_16(response.element ## _max);                  \
67 } while (0)
68
69 /**
70  * Endian converts the number of entries from the action (alloc) to
71  * the request
72  */
73 #define TF_SRAM_ALLOC_TO_REQ(action, index, request, element)                \
74         (request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index]))
75
76 /**
77  * Endian converts the start and stride value from the free to the request
78  */
79 #define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do {        \
80         request.element ## _start =                                          \
81                 tfp_cpu_to_le_16(sram_entry[index].start);                   \
82         request.element ## _stride =                                         \
83                 tfp_cpu_to_le_16(sram_entry[index].stride);                  \
84 } while (0)
85
86 /**
87  * Endian converts the start and stride from the HW response to the
88  * alloc
89  */
90 #define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do {     \
91         sram_entry[index].start =                                            \
92                 tfp_le_to_cpu_16(response.element ## _start);                \
93         sram_entry[index].stride =                                           \
94                 tfp_le_to_cpu_16(response.element ## _stride);               \
95 } while (0)
96
97 /**
98  * This is the MAX data we can transport across regular HWRM
99  */
100 #define TF_PCI_BUF_SIZE_MAX 88
101
102 /**
103  * If data bigger than TF_PCI_BUF_SIZE_MAX then use DMA method
104  */
105 struct tf_msg_dma_buf {
106         void *va_addr;
107         uint64_t pa_addr;
108 };
109
110 static int
111 tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type,
112                    uint32_t *hwrm_type)
113 {
114         int rc = 0;
115
116         switch (tcam_type) {
117         case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
118                 *hwrm_type = TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY;
119                 break;
120         case TF_TCAM_TBL_TYPE_PROF_TCAM:
121                 *hwrm_type = TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY;
122                 break;
123         case TF_TCAM_TBL_TYPE_WC_TCAM:
124                 *hwrm_type = TF_DEV_DATA_TYPE_TF_WC_ENTRY;
125                 break;
126         case TF_TCAM_TBL_TYPE_VEB_TCAM:
127                 rc = -EOPNOTSUPP;
128                 break;
129         case TF_TCAM_TBL_TYPE_SP_TCAM:
130                 rc = -EOPNOTSUPP;
131                 break;
132         case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
133                 rc = -EOPNOTSUPP;
134                 break;
135         default:
136                 rc = -EOPNOTSUPP;
137                 break;
138         }
139
140         return rc;
141 }
142
143 /**
144  * Sends session open request to TF Firmware
145  */
146 int
147 tf_msg_session_open(struct tf *tfp,
148                     char *ctrl_chan_name,
149                     uint8_t *fw_session_id)
150 {
151         int rc;
152         struct hwrm_tf_session_open_input req = { 0 };
153         struct hwrm_tf_session_open_output resp = { 0 };
154         struct tfp_send_msg_parms parms = { 0 };
155
156         /* Populate the request */
157         memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
158
159         parms.tf_type = HWRM_TF_SESSION_OPEN;
160         parms.req_data = (uint32_t *)&req;
161         parms.req_size = sizeof(req);
162         parms.resp_data = (uint32_t *)&resp;
163         parms.resp_size = sizeof(resp);
164         parms.mailbox = TF_KONG_MB;
165
166         rc = tfp_send_msg_direct(tfp,
167                                  &parms);
168         if (rc)
169                 return rc;
170
171         *fw_session_id = resp.fw_session_id;
172
173         return rc;
174 }
175
176 /**
177  * Sends session attach request to TF Firmware
178  */
179 int
180 tf_msg_session_attach(struct tf *tfp __rte_unused,
181                       char *ctrl_chan_name __rte_unused,
182                       uint8_t tf_fw_session_id __rte_unused)
183 {
184         return -1;
185 }
186
187 /**
188  * Sends session close request to TF Firmware
189  */
190 int
191 tf_msg_session_close(struct tf *tfp)
192 {
193         int rc;
194         struct hwrm_tf_session_close_input req = { 0 };
195         struct hwrm_tf_session_close_output resp = { 0 };
196         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
197         struct tfp_send_msg_parms parms = { 0 };
198
199         /* Populate the request */
200         req.fw_session_id =
201                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
202
203         parms.tf_type = HWRM_TF_SESSION_CLOSE;
204         parms.req_data = (uint32_t *)&req;
205         parms.req_size = sizeof(req);
206         parms.resp_data = (uint32_t *)&resp;
207         parms.resp_size = sizeof(resp);
208         parms.mailbox = TF_KONG_MB;
209
210         rc = tfp_send_msg_direct(tfp,
211                                  &parms);
212         return rc;
213 }
214
215 /**
216  * Sends session query config request to TF Firmware
217  */
218 int
219 tf_msg_session_qcfg(struct tf *tfp)
220 {
221         int rc;
222         struct hwrm_tf_session_qcfg_input  req = { 0 };
223         struct hwrm_tf_session_qcfg_output resp = { 0 };
224         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
225         struct tfp_send_msg_parms parms = { 0 };
226
227         /* Populate the request */
228         req.fw_session_id =
229                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
230
231         parms.tf_type = HWRM_TF_SESSION_QCFG,
232         parms.req_data = (uint32_t *)&req;
233         parms.req_size = sizeof(req);
234         parms.resp_data = (uint32_t *)&resp;
235         parms.resp_size = sizeof(resp);
236         parms.mailbox = TF_KONG_MB;
237
238         rc = tfp_send_msg_direct(tfp,
239                                  &parms);
240         return rc;
241 }
242
243 /**
244  * Sends session HW resource query capability request to TF Firmware
245  */
246 int
247 tf_msg_session_hw_resc_qcaps(struct tf *tfp,
248                              enum tf_dir dir,
249                              struct tf_rm_hw_query *query)
250 {
251         int rc;
252         struct tfp_send_msg_parms parms = { 0 };
253         struct tf_session_hw_resc_qcaps_input req = { 0 };
254         struct tf_session_hw_resc_qcaps_output resp = { 0 };
255         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
256
257         memset(query, 0, sizeof(*query));
258
259         /* Populate the request */
260         req.fw_session_id =
261                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
262         req.flags = tfp_cpu_to_le_16(dir);
263
264         MSG_PREP(parms,
265                  TF_KONG_MB,
266                  HWRM_TF,
267                  HWRM_TFT_SESSION_HW_RESC_QCAPS,
268                  req,
269                  resp);
270
271         rc = tfp_send_msg_tunneled(tfp, &parms);
272         if (rc)
273                 return rc;
274
275         /* Process the response */
276         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
277                             l2_ctx_tcam_entries);
278         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp,
279                             prof_func);
280         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp,
281                             prof_tcam_entries);
282         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
283                             em_prof_id);
284         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp,
285                             em_record_entries);
286         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
287                             wc_tcam_prof_id);
288         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp,
289                             wc_tcam_entries);
290         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp,
291                             meter_profiles);
292         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST,
293                             resp, meter_inst);
294         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp,
295                             mirrors);
296         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp,
297                             upar);
298         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp,
299                             sp_tcam_entries);
300         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp,
301                             l2_func);
302         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp,
303                             flex_key_templ);
304         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
305                             tbl_scope);
306         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp,
307                             epoch0_entries);
308         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp,
309                             epoch1_entries);
310         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp,
311                             metadata);
312         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp,
313                             ct_state);
314         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp,
315                             range_prof);
316         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
317                             range_entries);
318         TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
319                             lag_tbl_entries);
320
321         return tfp_le_to_cpu_32(parms.tf_resp_code);
322 }
323
324 /**
325  * Sends session HW resource allocation request to TF Firmware
326  */
327 int
328 tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused,
329                              enum tf_dir dir,
330                              struct tf_rm_hw_alloc *hw_alloc __rte_unused,
331                              struct tf_rm_entry *hw_entry __rte_unused)
332 {
333         int rc;
334         struct tfp_send_msg_parms parms = { 0 };
335         struct tf_session_hw_resc_alloc_input req = { 0 };
336         struct tf_session_hw_resc_alloc_output resp = { 0 };
337         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
338
339         memset(hw_entry, 0, sizeof(*hw_entry));
340
341         /* Populate the request */
342         req.fw_session_id =
343                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
344         req.flags = tfp_cpu_to_le_16(dir);
345
346         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
347                            l2_ctx_tcam_entries);
348         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req,
349                            prof_func_entries);
350         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req,
351                            prof_tcam_entries);
352         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req,
353                            em_prof_id);
354         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req,
355                            em_record_entries);
356         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
357                            wc_tcam_prof_id);
358         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req,
359                            wc_tcam_entries);
360         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req,
361                            meter_profiles);
362         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req,
363                            meter_inst);
364         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req,
365                            mirrors);
366         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req,
367                            upar);
368         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req,
369                            sp_tcam_entries);
370         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req,
371                            l2_func);
372         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req,
373                            flex_key_templ);
374         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req,
375                            tbl_scope);
376         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req,
377                            epoch0_entries);
378         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req,
379                            epoch1_entries);
380         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req,
381                            metadata);
382         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req,
383                            ct_state);
384         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req,
385                            range_prof);
386         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
387                            range_entries);
388         TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req,
389                            lag_tbl_entries);
390
391         MSG_PREP(parms,
392                  TF_KONG_MB,
393                  HWRM_TF,
394                  HWRM_TFT_SESSION_HW_RESC_ALLOC,
395                  req,
396                  resp);
397
398         rc = tfp_send_msg_tunneled(tfp, &parms);
399         if (rc)
400                 return rc;
401
402         /* Process the response */
403         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
404                             l2_ctx_tcam_entries);
405         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp,
406                             prof_func);
407         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp,
408                             prof_tcam_entries);
409         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
410                             em_prof_id);
411         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp,
412                             em_record_entries);
413         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
414                             wc_tcam_prof_id);
415         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp,
416                             wc_tcam_entries);
417         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp,
418                             meter_profiles);
419         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp,
420                             meter_inst);
421         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp,
422                             mirrors);
423         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp,
424                             upar);
425         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp,
426                             sp_tcam_entries);
427         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp,
428                             l2_func);
429         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp,
430                             flex_key_templ);
431         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
432                             tbl_scope);
433         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp,
434                             epoch0_entries);
435         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp,
436                             epoch1_entries);
437         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp,
438                             metadata);
439         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp,
440                             ct_state);
441         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp,
442                             range_prof);
443         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
444                             range_entries);
445         TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
446                             lag_tbl_entries);
447
448         return tfp_le_to_cpu_32(parms.tf_resp_code);
449 }
450
451 /**
452  * Sends session HW resource free request to TF Firmware
453  */
454 int
455 tf_msg_session_hw_resc_free(struct tf *tfp,
456                             enum tf_dir dir,
457                             struct tf_rm_entry *hw_entry)
458 {
459         int rc;
460         struct tfp_send_msg_parms parms = { 0 };
461         struct tf_session_hw_resc_free_input req = { 0 };
462         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
463
464         memset(hw_entry, 0, sizeof(*hw_entry));
465
466         /* Populate the request */
467         req.fw_session_id =
468                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
469         req.flags = tfp_cpu_to_le_16(dir);
470
471         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
472                           l2_ctx_tcam_entries);
473         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
474                           prof_func);
475         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
476                           prof_tcam_entries);
477         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
478                           em_prof_id);
479         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
480                           em_record_entries);
481         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
482                           wc_tcam_prof_id);
483         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
484                           wc_tcam_entries);
485         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
486                           meter_profiles);
487         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
488                           meter_inst);
489         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
490                           mirrors);
491         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
492                           upar);
493         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
494                           sp_tcam_entries);
495         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
496                           l2_func);
497         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
498                           flex_key_templ);
499         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
500                           tbl_scope);
501         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
502                           epoch0_entries);
503         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
504                           epoch1_entries);
505         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
506                           metadata);
507         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
508                           ct_state);
509         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
510                           range_prof);
511         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
512                           range_entries);
513         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
514                           lag_tbl_entries);
515
516         MSG_PREP_NO_RESP(parms,
517                          TF_KONG_MB,
518                          HWRM_TF,
519                          HWRM_TFT_SESSION_HW_RESC_FREE,
520                          req);
521
522         rc = tfp_send_msg_tunneled(tfp, &parms);
523         if (rc)
524                 return rc;
525
526         return tfp_le_to_cpu_32(parms.tf_resp_code);
527 }
528
529 /**
530  * Sends session HW resource flush request to TF Firmware
531  */
532 int
533 tf_msg_session_hw_resc_flush(struct tf *tfp,
534                              enum tf_dir dir,
535                              struct tf_rm_entry *hw_entry)
536 {
537         int rc;
538         struct tfp_send_msg_parms parms = { 0 };
539         struct tf_session_hw_resc_free_input req = { 0 };
540         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
541
542         /* Populate the request */
543         req.fw_session_id =
544                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
545         req.flags = tfp_cpu_to_le_16(dir);
546
547         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
548                           l2_ctx_tcam_entries);
549         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
550                           prof_func);
551         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
552                           prof_tcam_entries);
553         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
554                           em_prof_id);
555         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
556                           em_record_entries);
557         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
558                           wc_tcam_prof_id);
559         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
560                           wc_tcam_entries);
561         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
562                           meter_profiles);
563         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
564                           meter_inst);
565         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
566                           mirrors);
567         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
568                           upar);
569         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
570                           sp_tcam_entries);
571         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
572                           l2_func);
573         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
574                           flex_key_templ);
575         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
576                           tbl_scope);
577         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
578                           epoch0_entries);
579         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
580                           epoch1_entries);
581         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
582                           metadata);
583         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
584                           ct_state);
585         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
586                           range_prof);
587         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
588                           range_entries);
589         TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
590                           lag_tbl_entries);
591
592         MSG_PREP_NO_RESP(parms,
593                          TF_KONG_MB,
594                          TF_TYPE_TRUFLOW,
595                          HWRM_TFT_SESSION_HW_RESC_FLUSH,
596                          req);
597
598         rc = tfp_send_msg_tunneled(tfp, &parms);
599         if (rc)
600                 return rc;
601
602         return tfp_le_to_cpu_32(parms.tf_resp_code);
603 }
604
605 /**
606  * Sends session SRAM resource query capability request to TF Firmware
607  */
608 int
609 tf_msg_session_sram_resc_qcaps(struct tf *tfp __rte_unused,
610                                enum tf_dir dir,
611                                struct tf_rm_sram_query *query __rte_unused)
612 {
613         int rc;
614         struct tfp_send_msg_parms parms = { 0 };
615         struct tf_session_sram_resc_qcaps_input req = { 0 };
616         struct tf_session_sram_resc_qcaps_output resp = { 0 };
617         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
618
619         /* Populate the request */
620         req.fw_session_id =
621                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
622         req.flags = tfp_cpu_to_le_16(dir);
623
624         MSG_PREP(parms,
625                  TF_KONG_MB,
626                  HWRM_TF,
627                  HWRM_TFT_SESSION_SRAM_RESC_QCAPS,
628                  req,
629                  resp);
630
631         rc = tfp_send_msg_tunneled(tfp, &parms);
632         if (rc)
633                 return rc;
634
635         /* Process the response */
636         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_FULL_ACTION, resp,
637                               full_action);
638         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_MCG, resp,
639                               mcg);
640         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
641                               encap_8b);
642         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
643                               encap_16b);
644         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
645                               encap_64b);
646         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
647                               sp_smac);
648         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, resp,
649                               sp_smac_ipv4);
650         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, resp,
651                               sp_smac_ipv6);
652         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
653                               counter_64b);
654         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
655                               nat_sport);
656         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
657                               nat_dport);
658         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
659                               nat_s_ipv4);
660         TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
661                               nat_d_ipv4);
662
663         return tfp_le_to_cpu_32(parms.tf_resp_code);
664 }
665
666 /**
667  * Sends session SRAM resource allocation request to TF Firmware
668  */
669 int
670 tf_msg_session_sram_resc_alloc(struct tf *tfp __rte_unused,
671                                enum tf_dir dir,
672                                struct tf_rm_sram_alloc *sram_alloc __rte_unused,
673                                struct tf_rm_entry *sram_entry __rte_unused)
674 {
675         int rc;
676         struct tfp_send_msg_parms parms = { 0 };
677         struct tf_session_sram_resc_alloc_input req = { 0 };
678         struct tf_session_sram_resc_alloc_output resp;
679         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
680
681         memset(&resp, 0, sizeof(resp));
682
683         /* Populate the request */
684         req.fw_session_id =
685                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
686         req.flags = tfp_cpu_to_le_16(dir);
687
688         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
689                              full_action);
690         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_MCG, req,
691                              mcg);
692         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
693                              encap_8b);
694         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
695                              encap_16b);
696         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
697                              encap_64b);
698         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC, req,
699                              sp_smac);
700         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
701                              req, sp_smac_ipv4);
702         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
703                              req, sp_smac_ipv6);
704         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_COUNTER_64B,
705                              req, counter_64b);
706         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
707                              nat_sport);
708         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
709                              nat_dport);
710         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
711                              nat_s_ipv4);
712         TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
713                              nat_d_ipv4);
714
715         MSG_PREP(parms,
716                  TF_KONG_MB,
717                  HWRM_TF,
718                  HWRM_TFT_SESSION_SRAM_RESC_ALLOC,
719                  req,
720                  resp);
721
722         rc = tfp_send_msg_tunneled(tfp, &parms);
723         if (rc)
724                 return rc;
725
726         /* Process the response */
727         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION,
728                               resp, full_action);
729         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_MCG, resp,
730                               mcg);
731         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, resp,
732                               encap_8b);
733         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, resp,
734                               encap_16b);
735         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, resp,
736                               encap_64b);
737         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, resp,
738                               sp_smac);
739         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
740                               resp, sp_smac_ipv4);
741         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
742                               resp, sp_smac_ipv6);
743         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, resp,
744                               counter_64b);
745         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, resp,
746                               nat_sport);
747         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, resp,
748                               nat_dport);
749         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp,
750                               nat_s_ipv4);
751         TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp,
752                               nat_d_ipv4);
753
754         return tfp_le_to_cpu_32(parms.tf_resp_code);
755 }
756
757 /**
758  * Sends session SRAM resource free request to TF Firmware
759  */
760 int
761 tf_msg_session_sram_resc_free(struct tf *tfp __rte_unused,
762                               enum tf_dir dir,
763                               struct tf_rm_entry *sram_entry __rte_unused)
764 {
765         int rc;
766         struct tfp_send_msg_parms parms = { 0 };
767         struct tf_session_sram_resc_free_input req = { 0 };
768         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
769
770         /* Populate the request */
771         req.fw_session_id =
772                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
773         req.flags = tfp_cpu_to_le_16(dir);
774
775         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
776                             full_action);
777         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
778                             mcg);
779         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
780                             encap_8b);
781         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
782                             encap_16b);
783         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
784                             encap_64b);
785         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
786                             sp_smac);
787         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
788                             sp_smac_ipv4);
789         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
790                             sp_smac_ipv6);
791         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
792                             counter_64b);
793         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
794                             nat_sport);
795         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
796                             nat_dport);
797         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
798                             nat_s_ipv4);
799         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
800                             nat_d_ipv4);
801
802         MSG_PREP_NO_RESP(parms,
803                          TF_KONG_MB,
804                          HWRM_TF,
805                          HWRM_TFT_SESSION_SRAM_RESC_FREE,
806                          req);
807
808         rc = tfp_send_msg_tunneled(tfp, &parms);
809         if (rc)
810                 return rc;
811
812         return tfp_le_to_cpu_32(parms.tf_resp_code);
813 }
814
815 /**
816  * Sends session SRAM resource flush request to TF Firmware
817  */
818 int
819 tf_msg_session_sram_resc_flush(struct tf *tfp,
820                                enum tf_dir dir,
821                                struct tf_rm_entry *sram_entry)
822 {
823         int rc;
824         struct tfp_send_msg_parms parms = { 0 };
825         struct tf_session_sram_resc_free_input req = { 0 };
826         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
827
828         /* Populate the request */
829         req.fw_session_id =
830                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
831         req.flags = tfp_cpu_to_le_16(dir);
832
833         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req,
834                             full_action);
835         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req,
836                             mcg);
837         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req,
838                             encap_8b);
839         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req,
840                             encap_16b);
841         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req,
842                             encap_64b);
843         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req,
844                             sp_smac);
845         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req,
846                             sp_smac_ipv4);
847         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req,
848                             sp_smac_ipv6);
849         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req,
850                             counter_64b);
851         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req,
852                             nat_sport);
853         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req,
854                             nat_dport);
855         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req,
856                             nat_s_ipv4);
857         TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req,
858                             nat_d_ipv4);
859
860         MSG_PREP_NO_RESP(parms,
861                          TF_KONG_MB,
862                          TF_TYPE_TRUFLOW,
863                          HWRM_TFT_SESSION_SRAM_RESC_FLUSH,
864                          req);
865
866         rc = tfp_send_msg_tunneled(tfp, &parms);
867         if (rc)
868                 return rc;
869
870         return tfp_le_to_cpu_32(parms.tf_resp_code);
871 }
872
873 /**
874  * Sends EM mem register request to Firmware
875  */
876 int tf_msg_em_mem_rgtr(struct tf *tfp,
877                        int           page_lvl,
878                        int           page_size,
879                        uint64_t      dma_addr,
880                        uint16_t     *ctx_id)
881 {
882         int rc;
883         struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };
884         struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };
885         struct tfp_send_msg_parms parms = { 0 };
886
887         req.page_level = page_lvl;
888         req.page_size = page_size;
889         req.page_dir = tfp_cpu_to_le_64(dma_addr);
890
891         parms.tf_type = HWRM_TF_CTXT_MEM_RGTR;
892         parms.req_data = (uint32_t *)&req;
893         parms.req_size = sizeof(req);
894         parms.resp_data = (uint32_t *)&resp;
895         parms.resp_size = sizeof(resp);
896         parms.mailbox = TF_KONG_MB;
897
898         rc = tfp_send_msg_direct(tfp,
899                                  &parms);
900         if (rc)
901                 return rc;
902
903         *ctx_id = tfp_le_to_cpu_16(resp.ctx_id);
904
905         return rc;
906 }
907
908 /**
909  * Sends EM mem unregister request to Firmware
910  */
911 int tf_msg_em_mem_unrgtr(struct tf *tfp,
912                          uint16_t  *ctx_id)
913 {
914         int rc;
915         struct hwrm_tf_ctxt_mem_unrgtr_input req = {0};
916         struct hwrm_tf_ctxt_mem_unrgtr_output resp = {0};
917         struct tfp_send_msg_parms parms = { 0 };
918
919         req.ctx_id = tfp_cpu_to_le_32(*ctx_id);
920
921         parms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR;
922         parms.req_data = (uint32_t *)&req;
923         parms.req_size = sizeof(req);
924         parms.resp_data = (uint32_t *)&resp;
925         parms.resp_size = sizeof(resp);
926         parms.mailbox = TF_KONG_MB;
927
928         rc = tfp_send_msg_direct(tfp,
929                                  &parms);
930         return rc;
931 }
932
933 /**
934  * Sends EM qcaps request to Firmware
935  */
936 int tf_msg_em_qcaps(struct tf *tfp,
937                     int dir,
938                     struct tf_em_caps *em_caps)
939 {
940         int rc;
941         struct hwrm_tf_ext_em_qcaps_input  req = {0};
942         struct hwrm_tf_ext_em_qcaps_output resp = { 0 };
943         uint32_t             flags;
944         struct tfp_send_msg_parms parms = { 0 };
945
946         flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX :
947                  HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX);
948         req.flags = tfp_cpu_to_le_32(flags);
949
950         parms.tf_type = HWRM_TF_EXT_EM_QCAPS;
951         parms.req_data = (uint32_t *)&req;
952         parms.req_size = sizeof(req);
953         parms.resp_data = (uint32_t *)&resp;
954         parms.resp_size = sizeof(resp);
955         parms.mailbox = TF_KONG_MB;
956
957         rc = tfp_send_msg_direct(tfp,
958                                  &parms);
959         if (rc)
960                 return rc;
961
962         em_caps->supported = tfp_le_to_cpu_32(resp.supported);
963         em_caps->max_entries_supported =
964                 tfp_le_to_cpu_32(resp.max_entries_supported);
965         em_caps->key_entry_size = tfp_le_to_cpu_16(resp.key_entry_size);
966         em_caps->record_entry_size =
967                 tfp_le_to_cpu_16(resp.record_entry_size);
968         em_caps->efc_entry_size = tfp_le_to_cpu_16(resp.efc_entry_size);
969
970         return rc;
971 }
972
973 /**
974  * Sends EM config request to Firmware
975  */
976 int tf_msg_em_cfg(struct tf *tfp,
977                   uint32_t   num_entries,
978                   uint16_t   key0_ctx_id,
979                   uint16_t   key1_ctx_id,
980                   uint16_t   record_ctx_id,
981                   uint16_t   efc_ctx_id,
982                   uint8_t    flush_interval,
983                   int        dir)
984 {
985         int rc;
986         struct hwrm_tf_ext_em_cfg_input  req = {0};
987         struct hwrm_tf_ext_em_cfg_output resp = {0};
988         uint32_t flags;
989         struct tfp_send_msg_parms parms = { 0 };
990
991         flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
992                  HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
993         flags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD;
994
995         req.flags = tfp_cpu_to_le_32(flags);
996         req.num_entries = tfp_cpu_to_le_32(num_entries);
997
998         req.flush_interval = flush_interval;
999
1000         req.key0_ctx_id = tfp_cpu_to_le_16(key0_ctx_id);
1001         req.key1_ctx_id = tfp_cpu_to_le_16(key1_ctx_id);
1002         req.record_ctx_id = tfp_cpu_to_le_16(record_ctx_id);
1003         req.efc_ctx_id = tfp_cpu_to_le_16(efc_ctx_id);
1004
1005         parms.tf_type = HWRM_TF_EXT_EM_CFG;
1006         parms.req_data = (uint32_t *)&req;
1007         parms.req_size = sizeof(req);
1008         parms.resp_data = (uint32_t *)&resp;
1009         parms.resp_size = sizeof(resp);
1010         parms.mailbox = TF_KONG_MB;
1011
1012         rc = tfp_send_msg_direct(tfp,
1013                                  &parms);
1014         return rc;
1015 }
1016
1017 /**
1018  * Sends EM internal insert request to Firmware
1019  */
1020 int tf_msg_insert_em_internal_entry(struct tf *tfp,
1021                                 struct tf_insert_em_entry_parms *em_parms,
1022                                 uint16_t *rptr_index,
1023                                 uint8_t *rptr_entry,
1024                                 uint8_t *num_of_entries)
1025 {
1026         int rc;
1027         struct tfp_send_msg_parms parms = { 0 };
1028         struct tf_em_internal_insert_input req = { 0 };
1029         struct tf_em_internal_insert_output resp = { 0 };
1030         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1031         struct tf_em_64b_entry *em_result =
1032                 (struct tf_em_64b_entry *)em_parms->em_record;
1033
1034         req.fw_session_id =
1035                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1036         memcpy(req.em_key, em_parms->key, ((em_parms->key_sz_in_bits + 7) / 8));
1037         req.flags = tfp_cpu_to_le_16(em_parms->dir);
1038         req.strength = (em_result->hdr.word1 & TF_LKUP_RECORD_STRENGTH_MASK) >>
1039                 TF_LKUP_RECORD_STRENGTH_SHIFT;
1040         req.em_key_bitlen = em_parms->key_sz_in_bits;
1041         req.action_ptr = em_result->hdr.pointer;
1042         req.em_record_idx = *rptr_index;
1043
1044         MSG_PREP(parms,
1045                  TF_KONG_MB,
1046                  HWRM_TF,
1047                  HWRM_TFT_EM_RULE_INSERT,
1048                  req,
1049                  resp);
1050
1051         rc = tfp_send_msg_tunneled(tfp, &parms);
1052         if (rc)
1053                 return rc;
1054
1055         *rptr_entry = resp.rptr_entry;
1056         *rptr_index = resp.rptr_index;
1057         *num_of_entries = resp.num_of_entries;
1058
1059         return tfp_le_to_cpu_32(parms.tf_resp_code);
1060 }
1061
1062 /**
1063  * Sends EM delete insert request to Firmware
1064  */
1065 int tf_msg_delete_em_entry(struct tf *tfp,
1066                            struct tf_delete_em_entry_parms *em_parms)
1067 {
1068         int rc;
1069         struct tfp_send_msg_parms parms = { 0 };
1070         struct tf_em_internal_delete_input req = { 0 };
1071         struct tf_em_internal_delete_output resp = { 0 };
1072         struct tf_session *tfs =
1073                 (struct tf_session *)(tfp->session->core_data);
1074
1075         req.tf_session_id =
1076                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1077         req.flags = tfp_cpu_to_le_16(em_parms->dir);
1078         req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
1079
1080         MSG_PREP(parms,
1081                  TF_KONG_MB,
1082                  HWRM_TF,
1083                  HWRM_TFT_EM_RULE_DELETE,
1084                  req,
1085                 resp);
1086
1087         rc = tfp_send_msg_tunneled(tfp, &parms);
1088         if (rc)
1089                 return rc;
1090
1091         em_parms->index = tfp_le_to_cpu_16(resp.em_index);
1092
1093         return tfp_le_to_cpu_32(parms.tf_resp_code);
1094 }
1095
1096 /**
1097  * Sends EM operation request to Firmware
1098  */
1099 int tf_msg_em_op(struct tf *tfp,
1100                  int dir,
1101                  uint16_t op)
1102 {
1103         int rc;
1104         struct hwrm_tf_ext_em_op_input req = {0};
1105         struct hwrm_tf_ext_em_op_output resp = {0};
1106         uint32_t flags;
1107         struct tfp_send_msg_parms parms = { 0 };
1108
1109         flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
1110                  HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
1111         req.flags = tfp_cpu_to_le_32(flags);
1112         req.op = tfp_cpu_to_le_16(op);
1113
1114         parms.tf_type = HWRM_TF_EXT_EM_OP;
1115         parms.req_data = (uint32_t *)&req;
1116         parms.req_size = sizeof(req);
1117         parms.resp_data = (uint32_t *)&resp;
1118         parms.resp_size = sizeof(resp);
1119         parms.mailbox = TF_KONG_MB;
1120
1121         rc = tfp_send_msg_direct(tfp,
1122                                  &parms);
1123         return rc;
1124 }
1125
1126 int
1127 tf_msg_set_tbl_entry(struct tf *tfp,
1128                      enum tf_dir dir,
1129                      enum tf_tbl_type type,
1130                      uint16_t size,
1131                      uint8_t *data,
1132                      uint32_t index)
1133 {
1134         int rc;
1135         struct tfp_send_msg_parms parms = { 0 };
1136         struct tf_tbl_type_set_input req = { 0 };
1137         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1138
1139         /* Populate the request */
1140         req.fw_session_id =
1141                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1142         req.flags = tfp_cpu_to_le_16(dir);
1143         req.type = tfp_cpu_to_le_32(type);
1144         req.size = tfp_cpu_to_le_16(size);
1145         req.index = tfp_cpu_to_le_32(index);
1146
1147         tfp_memcpy(&req.data,
1148                    data,
1149                    size);
1150
1151         MSG_PREP_NO_RESP(parms,
1152                          TF_KONG_MB,
1153                          HWRM_TF,
1154                          HWRM_TFT_TBL_TYPE_SET,
1155                          req);
1156
1157         rc = tfp_send_msg_tunneled(tfp, &parms);
1158         if (rc)
1159                 return rc;
1160
1161         return tfp_le_to_cpu_32(parms.tf_resp_code);
1162 }
1163
1164 int
1165 tf_msg_get_tbl_entry(struct tf *tfp,
1166                      enum tf_dir dir,
1167                      enum tf_tbl_type type,
1168                      uint16_t size,
1169                      uint8_t *data,
1170                      uint32_t index)
1171 {
1172         int rc;
1173         struct tfp_send_msg_parms parms = { 0 };
1174         struct tf_tbl_type_get_input req = { 0 };
1175         struct tf_tbl_type_get_output resp = { 0 };
1176         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1177
1178         /* Populate the request */
1179         req.fw_session_id =
1180                 tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
1181         req.flags = tfp_cpu_to_le_16(dir);
1182         req.type = tfp_cpu_to_le_32(type);
1183         req.index = tfp_cpu_to_le_32(index);
1184
1185         MSG_PREP(parms,
1186                  TF_KONG_MB,
1187                  HWRM_TF,
1188                  HWRM_TFT_TBL_TYPE_GET,
1189                  req,
1190                  resp);
1191
1192         rc = tfp_send_msg_tunneled(tfp, &parms);
1193         if (rc)
1194                 return rc;
1195
1196         /* Verify that we got enough buffer to return the requested data */
1197         if (resp.size < size)
1198                 return -EINVAL;
1199
1200         tfp_memcpy(data,
1201                    &resp.data,
1202                    resp.size);
1203
1204         return tfp_le_to_cpu_32(parms.tf_resp_code);
1205 }
1206
1207 #define TF_BYTES_PER_SLICE(tfp) 12
1208 #define NUM_SLICES(tfp, bytes) \
1209         (((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp))
1210
1211 static int
1212 tf_msg_get_dma_buf(struct tf_msg_dma_buf *buf, int size)
1213 {
1214         struct tfp_calloc_parms alloc_parms;
1215         int rc;
1216
1217         /* Allocate session */
1218         alloc_parms.nitems = 1;
1219         alloc_parms.size = size;
1220         alloc_parms.alignment = 0;
1221         rc = tfp_calloc(&alloc_parms);
1222         if (rc) {
1223                 /* Log error */
1224                 PMD_DRV_LOG(ERR,
1225                             "Failed to allocate tcam dma entry, rc:%d\n",
1226                             rc);
1227                 return -ENOMEM;
1228         }
1229
1230         buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
1231         buf->va_addr = alloc_parms.mem_va;
1232
1233         return 0;
1234 }
1235
1236 int
1237 tf_msg_tcam_entry_set(struct tf *tfp,
1238                       struct tf_set_tcam_entry_parms *parms)
1239 {
1240         int rc;
1241         struct tfp_send_msg_parms mparms = { 0 };
1242         struct hwrm_tf_tcam_set_input req = { 0 };
1243         struct hwrm_tf_tcam_set_output resp = { 0 };
1244         uint16_t key_bytes =
1245                 TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits);
1246         uint16_t result_bytes =
1247                 TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits);
1248         struct tf_msg_dma_buf buf = { 0 };
1249         uint8_t *data = NULL;
1250         int data_size = 0;
1251
1252         rc = tf_tcam_tbl_2_hwrm(parms->tcam_tbl_type, &req.type);
1253         if (rc != 0)
1254                 return rc;
1255
1256         req.idx = tfp_cpu_to_le_16(parms->idx);
1257         if (parms->dir == TF_DIR_TX)
1258                 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX;
1259
1260         req.key_size = key_bytes;
1261         req.mask_offset = key_bytes;
1262         /* Result follows after key and mask, thus multiply by 2 */
1263         req.result_offset = 2 * key_bytes;
1264         req.result_size = result_bytes;
1265         data_size = 2 * req.key_size + req.result_size;
1266
1267         if (data_size <= TF_PCI_BUF_SIZE_MAX) {
1268                 /* use pci buffer */
1269                 data = &req.dev_data[0];
1270         } else {
1271                 /* use dma buffer */
1272                 req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA;
1273                 rc = tf_msg_get_dma_buf(&buf, data_size);
1274                 if (rc != 0)
1275                         return rc;
1276                 data = buf.va_addr;
1277                 memcpy(&req.dev_data[0], &buf.pa_addr, sizeof(buf.pa_addr));
1278         }
1279
1280         memcpy(&data[0], parms->key, key_bytes);
1281         memcpy(&data[key_bytes], parms->mask, key_bytes);
1282         memcpy(&data[req.result_offset], parms->result, result_bytes);
1283
1284         mparms.tf_type = HWRM_TF_TCAM_SET;
1285         mparms.req_data = (uint32_t *)&req;
1286         mparms.req_size = sizeof(req);
1287         mparms.resp_data = (uint32_t *)&resp;
1288         mparms.resp_size = sizeof(resp);
1289         mparms.mailbox = TF_KONG_MB;
1290
1291         rc = tfp_send_msg_direct(tfp,
1292                                  &mparms);
1293         if (rc)
1294                 return rc;
1295
1296         if (buf.va_addr != NULL)
1297                 tfp_free(buf.va_addr);
1298
1299         return rc;
1300 }
1301
1302 int
1303 tf_msg_tcam_entry_free(struct tf *tfp,
1304                        struct tf_free_tcam_entry_parms *in_parms)
1305 {
1306         int rc;
1307         struct hwrm_tf_tcam_free_input req =  { 0 };
1308         struct hwrm_tf_tcam_free_output resp = { 0 };
1309         struct tfp_send_msg_parms parms = { 0 };
1310
1311         /* Populate the request */
1312         rc = tf_tcam_tbl_2_hwrm(in_parms->tcam_tbl_type, &req.type);
1313         if (rc != 0)
1314                 return rc;
1315
1316         req.count = 1;
1317         req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx);
1318         if (in_parms->dir == TF_DIR_TX)
1319                 req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX;
1320
1321         parms.tf_type = HWRM_TF_TCAM_FREE;
1322         parms.req_data = (uint32_t *)&req;
1323         parms.req_size = sizeof(req);
1324         parms.resp_data = (uint32_t *)&resp;
1325         parms.resp_size = sizeof(resp);
1326         parms.mailbox = TF_KONG_MB;
1327
1328         rc = tfp_send_msg_direct(tfp,
1329                                  &parms);
1330         return rc;
1331 }