bnx2x: driver support routines
[dpdk.git] / drivers / net / bnx2x / ecore_sp.c
1 /*-
2  * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written consent.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34  * THE POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include "bnx2x.h"
38 #include "ecore_init.h"
39
40 /**** Exe Queue interfaces ****/
41
42 /**
43  * ecore_exe_queue_init - init the Exe Queue object
44  *
45  * @o:          pointer to the object
46  * @exe_len:    length
47  * @owner:      pointer to the owner
48  * @validate:   validate function pointer
49  * @optimize:   optimize function pointer
50  * @exec:       execute function pointer
51  * @get:        get function pointer
52  */
53 static void
54 ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
55                      struct ecore_exe_queue_obj *o,
56                      int exe_len,
57                      union ecore_qable_obj *owner,
58                      exe_q_validate validate,
59                      exe_q_remove remove,
60                      exe_q_optimize optimize, exe_q_execute exec, exe_q_get get)
61 {
62         ECORE_MEMSET(o, 0, sizeof(*o));
63
64         ECORE_LIST_INIT(&o->exe_queue);
65         ECORE_LIST_INIT(&o->pending_comp);
66
67         ECORE_SPIN_LOCK_INIT(&o->lock, sc);
68
69         o->exe_chunk_len = exe_len;
70         o->owner = owner;
71
72         /* Owner specific callbacks */
73         o->validate = validate;
74         o->remove = remove;
75         o->optimize = optimize;
76         o->execute = exec;
77         o->get = get;
78
79         ECORE_MSG("Setup the execution queue with the chunk length of %d",
80                   exe_len);
81 }
82
83 static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
84                                       struct ecore_exeq_elem *elem)
85 {
86         ECORE_MSG("Deleting an exe_queue element");
87         ECORE_FREE(sc, elem, sizeof(*elem));
88 }
89
90 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
91 {
92         struct ecore_exeq_elem *elem;
93         int cnt = 0;
94
95         ECORE_SPIN_LOCK_BH(&o->lock);
96
97         ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
98                                   struct ecore_exeq_elem) cnt++;
99
100         ECORE_SPIN_UNLOCK_BH(&o->lock);
101
102         return cnt;
103 }
104
105 /**
106  * ecore_exe_queue_add - add a new element to the execution queue
107  *
108  * @sc:         driver handle
109  * @o:          queue
110  * @cmd:        new command to add
111  * @restore:    true - do not optimize the command
112  *
113  * If the element is optimized or is illegal, frees it.
114  */
115 static int ecore_exe_queue_add(struct bnx2x_softc *sc,
116                                struct ecore_exe_queue_obj *o,
117                                struct ecore_exeq_elem *elem, int restore)
118 {
119         int rc;
120
121         ECORE_SPIN_LOCK_BH(&o->lock);
122
123         if (!restore) {
124                 /* Try to cancel this element queue */
125                 rc = o->optimize(sc, o->owner, elem);
126                 if (rc)
127                         goto free_and_exit;
128
129                 /* Check if this request is ok */
130                 rc = o->validate(sc, o->owner, elem);
131                 if (rc) {
132                         ECORE_MSG("Preamble failed: %d", rc);
133                         goto free_and_exit;
134                 }
135         }
136
137         /* If so, add it to the execution queue */
138         ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
139
140         ECORE_SPIN_UNLOCK_BH(&o->lock);
141
142         return ECORE_SUCCESS;
143
144 free_and_exit:
145         ecore_exe_queue_free_elem(sc, elem);
146
147         ECORE_SPIN_UNLOCK_BH(&o->lock);
148
149         return rc;
150 }
151
152 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj
153                                             *o)
154 {
155         struct ecore_exeq_elem *elem;
156
157         while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
158                 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
159                                               struct ecore_exeq_elem, link);
160
161                 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
162                 ecore_exe_queue_free_elem(sc, elem);
163         }
164 }
165
166 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc,
167                                                  struct ecore_exe_queue_obj *o)
168 {
169         ECORE_SPIN_LOCK_BH(&o->lock);
170
171         __ecore_exe_queue_reset_pending(sc, o);
172
173         ECORE_SPIN_UNLOCK_BH(&o->lock);
174 }
175
176 /**
177  * ecore_exe_queue_step - execute one execution chunk atomically
178  *
179  * @sc:                 driver handle
180  * @o:                  queue
181  * @ramrod_flags:       flags
182  *
183  * (Should be called while holding the exe_queue->lock).
184  */
185 static int ecore_exe_queue_step(struct bnx2x_softc *sc,
186                                 struct ecore_exe_queue_obj *o,
187                                 unsigned long *ramrod_flags)
188 {
189         struct ecore_exeq_elem *elem, spacer;
190         int cur_len = 0, rc;
191
192         ECORE_MEMSET(&spacer, 0, sizeof(spacer));
193
194         /* Next step should not be performed until the current is finished,
195          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
196          * properly clear object internals without sending any command to the FW
197          * which also implies there won't be any completion to clear the
198          * 'pending' list.
199          */
200         if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
201                 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
202                         ECORE_MSG
203                             ("RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
204                         __ecore_exe_queue_reset_pending(sc, o);
205                 } else {
206                         return ECORE_PENDING;
207                 }
208         }
209
210         /* Run through the pending commands list and create a next
211          * execution chunk.
212          */
213         while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
214                 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
215                                               struct ecore_exeq_elem, link);
216                 ECORE_DBG_BREAK_IF(!elem->cmd_len);
217
218                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
219                         cur_len += elem->cmd_len;
220                         /* Prevent from both lists being empty when moving an
221                          * element. This will allow the call of
222                          * ecore_exe_queue_empty() without locking.
223                          */
224                         ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
225                         mb();
226                         ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
227                         ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
228                         ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
229                 } else
230                         break;
231         }
232
233         /* Sanity check */
234         if (!cur_len)
235                 return ECORE_SUCCESS;
236
237         rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
238         if (rc < 0)
239                 /* In case of an error return the commands back to the queue
240                  *  and reset the pending_comp.
241                  */
242                 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
243         else if (!rc)
244                 /* If zero is returned, means there are no outstanding pending
245                  * completions and we may dismiss the pending list.
246                  */
247                 __ecore_exe_queue_reset_pending(sc, o);
248
249         return rc;
250 }
251
252 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
253 {
254         int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
255
256         /* Don't reorder!!! */
257         mb();
258
259         return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
260 }
261
262 static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
263                                                           bnx2x_softc *sc
264                                                           __rte_unused)
265 {
266         ECORE_MSG("Allocating a new exe_queue element");
267         return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
268 }
269
270 /************************ raw_obj functions ***********************************/
271 static int ecore_raw_check_pending(struct ecore_raw_obj *o)
272 {
273         /*
274          * !! converts the value returned by ECORE_TEST_BIT such that it
275          * is guaranteed not to be truncated regardless of int definition.
276          *
277          * Note we cannot simply define the function's return value type
278          * to match the type returned by ECORE_TEST_BIT, as it varies by
279          * platform/implementation.
280          */
281
282         return ! !ECORE_TEST_BIT(o->state, o->pstate);
283 }
284
285 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
286 {
287         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
288         ECORE_CLEAR_BIT(o->state, o->pstate);
289         ECORE_SMP_MB_AFTER_CLEAR_BIT();
290 }
291
292 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
293 {
294         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
295         ECORE_SET_BIT(o->state, o->pstate);
296         ECORE_SMP_MB_AFTER_CLEAR_BIT();
297 }
298
299 /**
300  * ecore_state_wait - wait until the given bit(state) is cleared
301  *
302  * @sc:         device handle
303  * @state:      state which is to be cleared
304  * @state_p:    state buffer
305  *
306  */
307 static int ecore_state_wait(struct bnx2x_softc *sc, int state,
308                             unsigned long *pstate)
309 {
310         /* can take a while if any port is running */
311         int cnt = 5000;
312
313         if (CHIP_REV_IS_EMUL(sc))
314                 cnt *= 20;
315
316         ECORE_MSG("waiting for state to become %d", state);
317
318         ECORE_MIGHT_SLEEP();
319         while (cnt--) {
320                 bnx2x_intr_legacy(sc, 1);
321                 if (!ECORE_TEST_BIT(state, pstate)) {
322 #ifdef ECORE_STOP_ON_ERROR
323                         ECORE_MSG("exit  (cnt %d)", 5000 - cnt);
324 #endif
325                         return ECORE_SUCCESS;
326                 }
327
328                 ECORE_WAIT(sc, delay_us);
329
330                 if (sc->panic)
331                         return ECORE_IO;
332         }
333
334         /* timeout! */
335         PMD_DRV_LOG(ERR, "timeout waiting for state %d", state);
336 #ifdef ECORE_STOP_ON_ERROR
337         ecore_panic();
338 #endif
339
340         return ECORE_TIMEOUT;
341 }
342
343 static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw)
344 {
345         return ecore_state_wait(sc, raw->state, raw->pstate);
346 }
347
348 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
349 /* credit handling callbacks */
350 static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
351 {
352         struct ecore_credit_pool_obj *mp = o->macs_pool;
353
354         ECORE_DBG_BREAK_IF(!mp);
355
356         return mp->get_entry(mp, offset);
357 }
358
359 static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
360 {
361         struct ecore_credit_pool_obj *mp = o->macs_pool;
362
363         ECORE_DBG_BREAK_IF(!mp);
364
365         return mp->get(mp, 1);
366 }
367
368 static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
369 {
370         struct ecore_credit_pool_obj *mp = o->macs_pool;
371
372         return mp->put_entry(mp, offset);
373 }
374
375 static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
376 {
377         struct ecore_credit_pool_obj *mp = o->macs_pool;
378
379         return mp->put(mp, 1);
380 }
381
382 /**
383  * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
384  * head list.
385  *
386  * @sc:         device handle
387  * @o:          vlan_mac object
388  *
389  * @details: Non-blocking implementation; should be called under execution
390  *           queue lock.
391  */
392 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
393                                             struct ecore_vlan_mac_obj *o)
394 {
395         if (o->head_reader) {
396                 ECORE_MSG("vlan_mac_lock writer - There are readers; Busy");
397                 return ECORE_BUSY;
398         }
399
400         ECORE_MSG("vlan_mac_lock writer - Taken");
401         return ECORE_SUCCESS;
402 }
403
404 /**
405  * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
406  * which wasn't able to run due to a taken lock on vlan mac head list.
407  *
408  * @sc:         device handle
409  * @o:          vlan_mac object
410  *
411  * @details Should be called under execution queue lock; notice it might release
412  *          and reclaim it during its run.
413  */
414 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
415                                             struct ecore_vlan_mac_obj *o)
416 {
417         int rc;
418         unsigned long ramrod_flags = o->saved_ramrod_flags;
419
420         ECORE_MSG("vlan_mac_lock execute pending command with ramrod flags %lu",
421                   ramrod_flags);
422         o->head_exe_request = FALSE;
423         o->saved_ramrod_flags = 0;
424         rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
425         if (rc != ECORE_SUCCESS) {
426                 PMD_DRV_LOG(ERR,
427                             "execution of pending commands failed with rc %d",
428                             rc);
429 #ifdef ECORE_STOP_ON_ERROR
430                 ecore_panic();
431 #endif
432         }
433 }
434
435 /**
436  * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
437  * called due to vlan mac head list lock being taken.
438  *
439  * @sc:                 device handle
440  * @o:                  vlan_mac object
441  * @ramrod_flags:       ramrod flags of missed execution
442  *
443  * @details Should be called under execution queue lock.
444  */
445 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
446                                     struct ecore_vlan_mac_obj *o,
447                                     unsigned long ramrod_flags)
448 {
449         o->head_exe_request = TRUE;
450         o->saved_ramrod_flags = ramrod_flags;
451         ECORE_MSG("Placing pending execution with ramrod flags %lu",
452                   ramrod_flags);
453 }
454
455 /**
456  * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
457  *
458  * @sc:                 device handle
459  * @o:                  vlan_mac object
460  *
461  * @details Should be called under execution queue lock. Notice if a pending
462  *          execution exists, it would perform it - possibly releasing and
463  *          reclaiming the execution queue lock.
464  */
465 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
466                                             struct ecore_vlan_mac_obj *o)
467 {
468         /* It's possible a new pending execution was added since this writer
469          * executed. If so, execute again. [Ad infinitum]
470          */
471         while (o->head_exe_request) {
472                 ECORE_MSG
473                     ("vlan_mac_lock - writer release encountered a pending request");
474                 __ecore_vlan_mac_h_exec_pending(sc, o);
475         }
476 }
477
478 /**
479  * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
480  *
481  * @sc:                 device handle
482  * @o:                  vlan_mac object
483  *
484  * @details Notice if a pending execution exists, it would perform it -
485  *          possibly releasing and reclaiming the execution queue lock.
486  */
487 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
488                                    struct ecore_vlan_mac_obj *o)
489 {
490         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
491         __ecore_vlan_mac_h_write_unlock(sc, o);
492         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
493 }
494
495 /**
496  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
497  *
498  * @sc:                 device handle
499  * @o:                  vlan_mac object
500  *
501  * @details Should be called under the execution queue lock. May sleep. May
502  *          release and reclaim execution queue lock during its run.
503  */
504 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
505                                         struct ecore_vlan_mac_obj *o)
506 {
507         /* If we got here, we're holding lock --> no WRITER exists */
508         o->head_reader++;
509         ECORE_MSG("vlan_mac_lock - locked reader - number %d", o->head_reader);
510
511         return ECORE_SUCCESS;
512 }
513
514 /**
515  * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
516  *
517  * @sc:                 device handle
518  * @o:                  vlan_mac object
519  *
520  * @details May sleep. Claims and releases execution queue lock during its run.
521  */
522 static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
523                                       struct ecore_vlan_mac_obj *o)
524 {
525         int rc;
526
527         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
528         rc = __ecore_vlan_mac_h_read_lock(sc, o);
529         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
530
531         return rc;
532 }
533
534 /**
535  * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
536  *
537  * @sc:                 device handle
538  * @o:                  vlan_mac object
539  *
540  * @details Should be called under execution queue lock. Notice if a pending
541  *          execution exists, it would be performed if this was the last
542  *          reader. possibly releasing and reclaiming the execution queue lock.
543  */
544 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
545                                            struct ecore_vlan_mac_obj *o)
546 {
547         if (!o->head_reader) {
548                 PMD_DRV_LOG(ERR,
549                             "Need to release vlan mac reader lock, but lock isn't taken");
550 #ifdef ECORE_STOP_ON_ERROR
551                 ecore_panic();
552 #endif
553         } else {
554                 o->head_reader--;
555                 PMD_DRV_LOG(INFO,
556                             "vlan_mac_lock - decreased readers to %d",
557                             o->head_reader);
558         }
559
560         /* It's possible a new pending execution was added, and that this reader
561          * was last - if so we need to execute the command.
562          */
563         if (!o->head_reader && o->head_exe_request) {
564                 PMD_DRV_LOG(INFO,
565                             "vlan_mac_lock - reader release encountered a pending request");
566
567                 /* Writer release will do the trick */
568                 __ecore_vlan_mac_h_write_unlock(sc, o);
569         }
570 }
571
572 /**
573  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
574  *
575  * @sc:                 device handle
576  * @o:                  vlan_mac object
577  *
578  * @details Notice if a pending execution exists, it would be performed if this
579  *          was the last reader. Claims and releases the execution queue lock
580  *          during its run.
581  */
582 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
583                                   struct ecore_vlan_mac_obj *o)
584 {
585         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
586         __ecore_vlan_mac_h_read_unlock(sc, o);
587         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
588 }
589
590 /**
591  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
592  *
593  * @sc:                 device handle
594  * @o:                  vlan_mac object
595  * @n:                  number of elements to get
596  * @base:               base address for element placement
597  * @stride:             stride between elements (in bytes)
598  */
599 static int ecore_get_n_elements(struct bnx2x_softc *sc,
600                                 struct ecore_vlan_mac_obj *o, int n,
601                                 uint8_t * base, uint8_t stride, uint8_t size)
602 {
603         struct ecore_vlan_mac_registry_elem *pos;
604         uint8_t *next = base;
605         int counter = 0, read_lock;
606
607         ECORE_MSG("get_n_elements - taking vlan_mac_lock (reader)");
608         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
609         if (read_lock != ECORE_SUCCESS)
610                 PMD_DRV_LOG(ERR,
611                             "get_n_elements failed to get vlan mac reader lock; Access without lock");
612
613         /* traverse list */
614         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
615                                   struct ecore_vlan_mac_registry_elem) {
616                 if (counter < n) {
617                         ECORE_MEMCPY(next, &pos->u, size);
618                         counter++;
619                         ECORE_MSG
620                             ("copied element number %d to address %p element was:",
621                              counter, next);
622                         next += stride + size;
623                 }
624         }
625
626         if (read_lock == ECORE_SUCCESS) {
627                 ECORE_MSG("get_n_elements - releasing vlan_mac_lock (reader)");
628                 ecore_vlan_mac_h_read_unlock(sc, o);
629         }
630
631         return counter * ETH_ALEN;
632 }
633
634 /* check_add() callbacks */
635 static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
636                                struct ecore_vlan_mac_obj *o,
637                                union ecore_classification_ramrod_data *data)
638 {
639         struct ecore_vlan_mac_registry_elem *pos;
640
641         ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
642                   data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
643                   data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
644
645         if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
646                 return ECORE_INVAL;
647
648         /* Check if a requested MAC already exists */
649         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
650                                   struct ecore_vlan_mac_registry_elem)
651             if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
652                 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
653                 return ECORE_EXISTS;
654
655         return ECORE_SUCCESS;
656 }
657
658 /* check_del() callbacks */
659 static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc
660                                                                 *sc
661                                                                 __rte_unused,
662                                                                 struct
663                                                                 ecore_vlan_mac_obj
664                                                                 *o, union
665                                                                 ecore_classification_ramrod_data
666                                                                 *data)
667 {
668         struct ecore_vlan_mac_registry_elem *pos;
669
670         ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
671                   data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
672                   data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
673
674         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
675                                   struct ecore_vlan_mac_registry_elem)
676         if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
677             (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
678                 return pos;
679
680         return NULL;
681 }
682
683 /* check_move() callback */
684 static int ecore_check_move(struct bnx2x_softc *sc,
685                             struct ecore_vlan_mac_obj *src_o,
686                             struct ecore_vlan_mac_obj *dst_o,
687                             union ecore_classification_ramrod_data *data)
688 {
689         struct ecore_vlan_mac_registry_elem *pos;
690         int rc;
691
692         /* Check if we can delete the requested configuration from the first
693          * object.
694          */
695         pos = src_o->check_del(sc, src_o, data);
696
697         /*  check if configuration can be added */
698         rc = dst_o->check_add(sc, dst_o, data);
699
700         /* If this classification can not be added (is already set)
701          * or can't be deleted - return an error.
702          */
703         if (rc || !pos)
704                 return FALSE;
705
706         return TRUE;
707 }
708
709 static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc,
710                                        __rte_unused struct ecore_vlan_mac_obj
711                                        *src_o, __rte_unused struct ecore_vlan_mac_obj
712                                        *dst_o, __rte_unused union
713                                        ecore_classification_ramrod_data *data)
714 {
715         return FALSE;
716 }
717
718 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
719                                              *o)
720 {
721         struct ecore_raw_obj *raw = &o->raw;
722         uint8_t rx_tx_flag = 0;
723
724         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
725             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
726                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
727
728         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
729             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
730                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
731
732         return rx_tx_flag;
733 }
734
735 static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
736                                  int add, unsigned char *dev_addr, int index)
737 {
738         uint32_t wb_data[2];
739         uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
740             NIG_REG_LLH0_FUNC_MEM;
741
742         if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
743                 return;
744
745         if (index > ECORE_LLH_CAM_MAX_PF_LINE)
746                 return;
747
748         ECORE_MSG("Going to %s LLH configuration at entry %d",
749                   (add ? "ADD" : "DELETE"), index);
750
751         if (add) {
752                 /* LLH_FUNC_MEM is a uint64_t WB register */
753                 reg_offset += 8 * index;
754
755                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
756                               (dev_addr[4] << 8) | dev_addr[5]);
757                 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
758
759                 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
760         }
761
762         REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
763                     NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add);
764 }
765
766 /**
767  * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
768  *
769  * @sc:         device handle
770  * @o:          queue for which we want to configure this rule
771  * @add:        if TRUE the command is an ADD command, DEL otherwise
772  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
773  * @hdr:        pointer to a header to setup
774  *
775  */
776 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o,
777                                           int add, int opcode,
778                                           struct eth_classify_cmd_header
779                                           *hdr)
780 {
781         struct ecore_raw_obj *raw = &o->raw;
782
783         hdr->client_id = raw->cl_id;
784         hdr->func_id = raw->func_id;
785
786         /* Rx or/and Tx (internal switching) configuration ? */
787         hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o);
788
789         if (add)
790                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
791
792         hdr->cmd_general_data |=
793             (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
794 }
795
796 /**
797  * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
798  *
799  * @cid:        connection id
800  * @type:       ECORE_FILTER_XXX_PENDING
801  * @hdr:        pointer to header to setup
802  * @rule_cnt:
803  *
804  * currently we always configure one rule and echo field to contain a CID and an
805  * opcode type.
806  */
807 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header
808                                             *hdr, int rule_cnt)
809 {
810         hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
811                                       (type << ECORE_SWCID_SHIFT));
812         hdr->rule_cnt = (uint8_t) rule_cnt;
813 }
814
815 /* hw_config() callbacks */
816 static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
817                                  struct ecore_vlan_mac_obj *o,
818                                  struct ecore_exeq_elem *elem, int rule_idx,
819                                  __rte_unused int cam_offset)
820 {
821         struct ecore_raw_obj *raw = &o->raw;
822         struct eth_classify_rules_ramrod_data *data =
823             (struct eth_classify_rules_ramrod_data *)(raw->rdata);
824         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
825         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
826         int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
827         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
828         uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
829
830         /* Set LLH CAM entry: currently only iSCSI and ETH macs are
831          * relevant. In addition, current implementation is tuned for a
832          * single ETH MAC.
833          *
834          * When multiple unicast ETH MACs PF configuration in switch
835          * independent mode is required (NetQ, multiple netdev MACs,
836          * etc.), consider better utilisation of 8 per function MAC
837          * entries in the LLH register. There is also
838          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
839          * total number of CAM entries to 16.
840          *
841          * Currently we won't configure NIG for MACs other than a primary ETH
842          * MAC and iSCSI L2 MAC.
843          *
844          * If this MAC is moving from one Queue to another, no need to change
845          * NIG configuration.
846          */
847         if (cmd != ECORE_VLAN_MAC_MOVE) {
848                 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
849                         ecore_set_mac_in_nig(sc, add, mac,
850                                              ECORE_LLH_CAM_ISCSI_ETH_LINE);
851                 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
852                         ecore_set_mac_in_nig(sc, add, mac,
853                                              ECORE_LLH_CAM_ETH_LINE);
854         }
855
856         /* Reset the ramrod data buffer for the first rule */
857         if (rule_idx == 0)
858                 ECORE_MEMSET(data, 0, sizeof(*data));
859
860         /* Setup a command header */
861         ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
862                                       &rule_entry->mac.header);
863
864         ECORE_MSG("About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
865                   (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
866                   mac[4], mac[5], raw->cl_id);
867
868         /* Set a MAC itself */
869         ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
870                               &rule_entry->mac.mac_mid,
871                               &rule_entry->mac.mac_lsb, mac);
872         rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
873
874         /* MOVE: Add a rule that will add this MAC to the target Queue */
875         if (cmd == ECORE_VLAN_MAC_MOVE) {
876                 rule_entry++;
877                 rule_cnt++;
878
879                 /* Setup ramrod data */
880                 ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data.
881                                               vlan_mac.target_obj, TRUE,
882                                               CLASSIFY_RULE_OPCODE_MAC,
883                                               &rule_entry->mac.header);
884
885                 /* Set a MAC itself */
886                 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
887                                       &rule_entry->mac.mac_mid,
888                                       &rule_entry->mac.mac_lsb, mac);
889                 rule_entry->mac.inner_mac =
890                     elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
891         }
892
893         /* Set the ramrod data header */
894         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
895                                         rule_cnt);
896 }
897
898 /**
899  * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
900  *
901  * @sc:         device handle
902  * @o:          queue
903  * @type:
904  * @cam_offset: offset in cam memory
905  * @hdr:        pointer to a header to setup
906  *
907  * E1H
908  */
909 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
910                                              *o, int type, int cam_offset, struct mac_configuration_hdr
911                                              *hdr)
912 {
913         struct ecore_raw_obj *r = &o->raw;
914
915         hdr->length = 1;
916         hdr->offset = (uint8_t) cam_offset;
917         hdr->client_id = ECORE_CPU_TO_LE16(0xff);
918         hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
919                                       (type << ECORE_SWCID_SHIFT));
920 }
921
922 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
923                                              *o, int add, int opcode,
924                                              uint8_t * mac,
925                                              uint16_t vlan_id, struct
926                                              mac_configuration_entry
927                                              *cfg_entry)
928 {
929         struct ecore_raw_obj *r = &o->raw;
930         uint32_t cl_bit_vec = (1 << r->cl_id);
931
932         cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
933         cfg_entry->pf_id = r->func_id;
934         cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
935
936         if (add) {
937                 ECORE_SET_FLAG(cfg_entry->flags,
938                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
939                                T_ETH_MAC_COMMAND_SET);
940                 ECORE_SET_FLAG(cfg_entry->flags,
941                                MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
942                                opcode);
943
944                 /* Set a MAC in a ramrod data */
945                 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
946                                       &cfg_entry->middle_mac_addr,
947                                       &cfg_entry->lsb_mac_addr, mac);
948         } else
949                 ECORE_SET_FLAG(cfg_entry->flags,
950                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
951                                T_ETH_MAC_COMMAND_INVALIDATE);
952 }
953
954 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
955                                          __rte_unused,
956                                          struct ecore_vlan_mac_obj *o,
957                                          int type, int cam_offset,
958                                          int add, uint8_t * mac,
959                                          uint16_t vlan_id, int opcode,
960                                          struct mac_configuration_cmd
961                                          *config)
962 {
963         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
964
965         ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr);
966         ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
967                                          cfg_entry);
968
969         ECORE_MSG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
970                   (add ? "setting" : "clearing"),
971                   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
972                   o->raw.cl_id, cam_offset);
973 }
974
975 /**
976  * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
977  *
978  * @sc:         device handle
979  * @o:          ecore_vlan_mac_obj
980  * @elem:       ecore_exeq_elem
981  * @rule_idx:   rule_idx
982  * @cam_offset: cam_offset
983  */
984 static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc,
985                                   struct ecore_vlan_mac_obj *o,
986                                   struct ecore_exeq_elem *elem,
987                                   __rte_unused int rule_idx, int cam_offset)
988 {
989         struct ecore_raw_obj *raw = &o->raw;
990         struct mac_configuration_cmd *config =
991             (struct mac_configuration_cmd *)(raw->rdata);
992         /* 57711 do not support MOVE command,
993          * so it's either ADD or DEL
994          */
995         int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
996             TRUE : FALSE;
997
998         /* Reset the ramrod data buffer */
999         ECORE_MEMSET(config, 0, sizeof(*config));
1000
1001         ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
1002                                      cam_offset, add,
1003                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
1004                                      ETH_VLAN_FILTER_ANY_VLAN, config);
1005 }
1006
1007 /**
1008  * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1009  *
1010  * @sc:         device handle
1011  * @p:          command parameters
1012  * @ppos:       pointer to the cookie
1013  *
1014  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1015  * previously configured elements list.
1016  *
1017  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1018  * into an account
1019  *
1020  * pointer to the cookie  - that should be given back in the next call to make
1021  * function handle the next element. If *ppos is set to NULL it will restart the
1022  * iterator. If returned *ppos == NULL this means that the last element has been
1023  * handled.
1024  *
1025  */
1026 static int ecore_vlan_mac_restore(struct bnx2x_softc *sc,
1027                                   struct ecore_vlan_mac_ramrod_params *p,
1028                                   struct ecore_vlan_mac_registry_elem **ppos)
1029 {
1030         struct ecore_vlan_mac_registry_elem *pos;
1031         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1032
1033         /* If list is empty - there is nothing to do here */
1034         if (ECORE_LIST_IS_EMPTY(&o->head)) {
1035                 *ppos = NULL;
1036                 return 0;
1037         }
1038
1039         /* make a step... */
1040         if (*ppos == NULL)
1041                 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct
1042                                                ecore_vlan_mac_registry_elem,
1043                                                link);
1044         else
1045                 *ppos = ECORE_LIST_NEXT(*ppos, link,
1046                                         struct ecore_vlan_mac_registry_elem);
1047
1048         pos = *ppos;
1049
1050         /* If it's the last step - return NULL */
1051         if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1052                 *ppos = NULL;
1053
1054         /* Prepare a 'user_req' */
1055         ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1056
1057         /* Set the command */
1058         p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1059
1060         /* Set vlan_mac_flags */
1061         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1062
1063         /* Set a restore bit */
1064         ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1065
1066         return ecore_config_vlan_mac(sc, p);
1067 }
1068
1069 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1070  * pointer to an element with a specific criteria and NULL if such an element
1071  * hasn't been found.
1072  */
1073 static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o,
1074                                                   struct ecore_exeq_elem *elem)
1075 {
1076         struct ecore_exeq_elem *pos;
1077         struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1078
1079         /* Check pending for execution commands */
1080         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1081                                   struct ecore_exeq_elem)
1082         if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1083                           sizeof(*data)) &&
1084             (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1085                 return pos;
1086
1087         return NULL;
1088 }
1089
1090 /**
1091  * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1092  *
1093  * @sc:         device handle
1094  * @qo:         ecore_qable_obj
1095  * @elem:       ecore_exeq_elem
1096  *
1097  * Checks that the requested configuration can be added. If yes and if
1098  * requested, consume CAM credit.
1099  *
1100  * The 'validate' is run after the 'optimize'.
1101  *
1102  */
1103 static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
1104                                        union ecore_qable_obj *qo,
1105                                        struct ecore_exeq_elem *elem)
1106 {
1107         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1108         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1109         int rc;
1110
1111         /* Check the registry */
1112         rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1113         if (rc) {
1114                 ECORE_MSG
1115                     ("ADD command is not allowed considering current registry state.");
1116                 return rc;
1117         }
1118
1119         /* Check if there is a pending ADD command for this
1120          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1121          */
1122         if (exeq->get(exeq, elem)) {
1123                 ECORE_MSG("There is a pending ADD command already");
1124                 return ECORE_EXISTS;
1125         }
1126
1127         /* Consume the credit if not requested not to */
1128         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1129                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1130               o->get_credit(o)))
1131                 return ECORE_INVAL;
1132
1133         return ECORE_SUCCESS;
1134 }
1135
1136 /**
1137  * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1138  *
1139  * @sc:         device handle
1140  * @qo:         quable object to check
1141  * @elem:       element that needs to be deleted
1142  *
1143  * Checks that the requested configuration can be deleted. If yes and if
1144  * requested, returns a CAM credit.
1145  *
1146  * The 'validate' is run after the 'optimize'.
1147  */
1148 static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
1149                                        union ecore_qable_obj *qo,
1150                                        struct ecore_exeq_elem *elem)
1151 {
1152         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1153         struct ecore_vlan_mac_registry_elem *pos;
1154         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1155         struct ecore_exeq_elem query_elem;
1156
1157         /* If this classification can not be deleted (doesn't exist)
1158          * - return a ECORE_EXIST.
1159          */
1160         pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1161         if (!pos) {
1162                 ECORE_MSG
1163                     ("DEL command is not allowed considering current registry state");
1164                 return ECORE_EXISTS;
1165         }
1166
1167         /* Check if there are pending DEL or MOVE commands for this
1168          * MAC/VLAN/VLAN-MAC. Return an error if so.
1169          */
1170         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1171
1172         /* Check for MOVE commands */
1173         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1174         if (exeq->get(exeq, &query_elem)) {
1175                 PMD_DRV_LOG(ERR, "There is a pending MOVE command already");
1176                 return ECORE_INVAL;
1177         }
1178
1179         /* Check for DEL commands */
1180         if (exeq->get(exeq, elem)) {
1181                 ECORE_MSG("There is a pending DEL command already");
1182                 return ECORE_EXISTS;
1183         }
1184
1185         /* Return the credit to the credit pool if not requested not to */
1186         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1187                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1188               o->put_credit(o))) {
1189                 PMD_DRV_LOG(ERR, "Failed to return a credit");
1190                 return ECORE_INVAL;
1191         }
1192
1193         return ECORE_SUCCESS;
1194 }
1195
1196 /**
1197  * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1198  *
1199  * @sc:         device handle
1200  * @qo:         quable object to check (source)
1201  * @elem:       element that needs to be moved
1202  *
1203  * Checks that the requested configuration can be moved. If yes and if
1204  * requested, returns a CAM credit.
1205  *
1206  * The 'validate' is run after the 'optimize'.
1207  */
1208 static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
1209                                         union ecore_qable_obj *qo,
1210                                         struct ecore_exeq_elem *elem)
1211 {
1212         struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1213         struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1214         struct ecore_exeq_elem query_elem;
1215         struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1216         struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1217
1218         /* Check if we can perform this operation based on the current registry
1219          * state.
1220          */
1221         if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1222                 ECORE_MSG
1223                     ("MOVE command is not allowed considering current registry state");
1224                 return ECORE_INVAL;
1225         }
1226
1227         /* Check if there is an already pending DEL or MOVE command for the
1228          * source object or ADD command for a destination object. Return an
1229          * error if so.
1230          */
1231         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1232
1233         /* Check DEL on source */
1234         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1235         if (src_exeq->get(src_exeq, &query_elem)) {
1236                 PMD_DRV_LOG(ERR,
1237                             "There is a pending DEL command on the source queue already");
1238                 return ECORE_INVAL;
1239         }
1240
1241         /* Check MOVE on source */
1242         if (src_exeq->get(src_exeq, elem)) {
1243                 ECORE_MSG("There is a pending MOVE command already");
1244                 return ECORE_EXISTS;
1245         }
1246
1247         /* Check ADD on destination */
1248         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1249         if (dest_exeq->get(dest_exeq, &query_elem)) {
1250                 PMD_DRV_LOG(ERR,
1251                             "There is a pending ADD command on the destination queue already");
1252                 return ECORE_INVAL;
1253         }
1254
1255         /* Consume the credit if not requested not to */
1256         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1257                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1258               dest_o->get_credit(dest_o)))
1259                 return ECORE_INVAL;
1260
1261         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1262                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1263               src_o->put_credit(src_o))) {
1264                 /* return the credit taken from dest... */
1265                 dest_o->put_credit(dest_o);
1266                 return ECORE_INVAL;
1267         }
1268
1269         return ECORE_SUCCESS;
1270 }
1271
1272 static int ecore_validate_vlan_mac(struct bnx2x_softc *sc,
1273                                    union ecore_qable_obj *qo,
1274                                    struct ecore_exeq_elem *elem)
1275 {
1276         switch (elem->cmd_data.vlan_mac.cmd) {
1277         case ECORE_VLAN_MAC_ADD:
1278                 return ecore_validate_vlan_mac_add(sc, qo, elem);
1279         case ECORE_VLAN_MAC_DEL:
1280                 return ecore_validate_vlan_mac_del(sc, qo, elem);
1281         case ECORE_VLAN_MAC_MOVE:
1282                 return ecore_validate_vlan_mac_move(sc, qo, elem);
1283         default:
1284                 return ECORE_INVAL;
1285         }
1286 }
1287
1288 static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc,
1289                                  union ecore_qable_obj *qo,
1290                                  struct ecore_exeq_elem *elem)
1291 {
1292         int rc = 0;
1293
1294         /* If consumption wasn't required, nothing to do */
1295         if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1296                            &elem->cmd_data.vlan_mac.vlan_mac_flags))
1297                 return ECORE_SUCCESS;
1298
1299         switch (elem->cmd_data.vlan_mac.cmd) {
1300         case ECORE_VLAN_MAC_ADD:
1301         case ECORE_VLAN_MAC_MOVE:
1302                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1303                 break;
1304         case ECORE_VLAN_MAC_DEL:
1305                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1306                 break;
1307         default:
1308                 return ECORE_INVAL;
1309         }
1310
1311         if (rc != TRUE)
1312                 return ECORE_INVAL;
1313
1314         return ECORE_SUCCESS;
1315 }
1316
1317 /**
1318  * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1319  *
1320  * @sc:         device handle
1321  * @o:          ecore_vlan_mac_obj
1322  *
1323  */
1324 static int ecore_wait_vlan_mac(struct bnx2x_softc *sc,
1325                                struct ecore_vlan_mac_obj *o)
1326 {
1327         int cnt = 5000, rc;
1328         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1329         struct ecore_raw_obj *raw = &o->raw;
1330
1331         while (cnt--) {
1332                 /* Wait for the current command to complete */
1333                 rc = raw->wait_comp(sc, raw);
1334                 if (rc)
1335                         return rc;
1336
1337                 /* Wait until there are no pending commands */
1338                 if (!ecore_exe_queue_empty(exeq))
1339                         ECORE_WAIT(sc, 1000);
1340                 else
1341                         return ECORE_SUCCESS;
1342         }
1343
1344         return ECORE_TIMEOUT;
1345 }
1346
1347 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
1348                                          struct ecore_vlan_mac_obj *o,
1349                                          unsigned long *ramrod_flags)
1350 {
1351         int rc = ECORE_SUCCESS;
1352
1353         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1354
1355         ECORE_MSG("vlan_mac_execute_step - trying to take writer lock");
1356         rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1357
1358         if (rc != ECORE_SUCCESS) {
1359                 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1360
1361                 /** Calling function should not diffrentiate between this case
1362                  *  and the case in which there is already a pending ramrod
1363                  */
1364                 rc = ECORE_PENDING;
1365         } else {
1366                 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1367         }
1368         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1369
1370         return rc;
1371 }
1372
1373 /**
1374  * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1375  *
1376  * @sc:         device handle
1377  * @o:          ecore_vlan_mac_obj
1378  * @cqe:
1379  * @cont:       if TRUE schedule next execution chunk
1380  *
1381  */
1382 static int ecore_complete_vlan_mac(struct bnx2x_softc *sc,
1383                                    struct ecore_vlan_mac_obj *o,
1384                                    union event_ring_elem *cqe,
1385                                    unsigned long *ramrod_flags)
1386 {
1387         struct ecore_raw_obj *r = &o->raw;
1388         int rc;
1389
1390         /* Reset pending list */
1391         ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1392
1393         /* Clear pending */
1394         r->clear_pending(r);
1395
1396         /* If ramrod failed this is most likely a SW bug */
1397         if (cqe->message.error)
1398                 return ECORE_INVAL;
1399
1400         /* Run the next bulk of pending commands if requested */
1401         if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1402                 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1403                 if (rc < 0)
1404                         return rc;
1405         }
1406
1407         /* If there is more work to do return PENDING */
1408         if (!ecore_exe_queue_empty(&o->exe_queue))
1409                 return ECORE_PENDING;
1410
1411         return ECORE_SUCCESS;
1412 }
1413
1414 /**
1415  * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1416  *
1417  * @sc:         device handle
1418  * @o:          ecore_qable_obj
1419  * @elem:       ecore_exeq_elem
1420  */
1421 static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
1422                                    union ecore_qable_obj *qo,
1423                                    struct ecore_exeq_elem *elem)
1424 {
1425         struct ecore_exeq_elem query, *pos;
1426         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1427         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1428
1429         ECORE_MEMCPY(&query, elem, sizeof(query));
1430
1431         switch (elem->cmd_data.vlan_mac.cmd) {
1432         case ECORE_VLAN_MAC_ADD:
1433                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1434                 break;
1435         case ECORE_VLAN_MAC_DEL:
1436                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1437                 break;
1438         default:
1439                 /* Don't handle anything other than ADD or DEL */
1440                 return 0;
1441         }
1442
1443         /* If we found the appropriate element - delete it */
1444         pos = exeq->get(exeq, &query);
1445         if (pos) {
1446
1447                 /* Return the credit of the optimized command */
1448                 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1449                                     &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1450                         if ((query.cmd_data.vlan_mac.cmd ==
1451                              ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1452                                 PMD_DRV_LOG(ERR,
1453                                             "Failed to return the credit for the optimized ADD command");
1454                                 return ECORE_INVAL;
1455                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1456                                 PMD_DRV_LOG(ERR,
1457                                             "Failed to recover the credit from the optimized DEL command");
1458                                 return ECORE_INVAL;
1459                         }
1460                 }
1461
1462                 ECORE_MSG("Optimizing %s command",
1463                           (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1464                           "ADD" : "DEL");
1465
1466                 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1467                 ecore_exe_queue_free_elem(sc, pos);
1468                 return 1;
1469         }
1470
1471         return 0;
1472 }
1473
1474 /**
1475  * ecore_vlan_mac_get_registry_elem - prepare a registry element
1476  *
1477  * @sc:   device handle
1478  * @o:
1479  * @elem:
1480  * @restore:
1481  * @re:
1482  *
1483  * prepare a registry element according to the current command request.
1484  */
1485 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
1486                                             struct ecore_vlan_mac_obj *o,
1487                                             struct ecore_exeq_elem *elem,
1488                                             int restore, struct
1489                                             ecore_vlan_mac_registry_elem
1490                                             **re)
1491 {
1492         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1493         struct ecore_vlan_mac_registry_elem *reg_elem;
1494
1495         /* Allocate a new registry element if needed. */
1496         if (!restore &&
1497             ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1498                 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1499                 if (!reg_elem)
1500                         return ECORE_NOMEM;
1501
1502                 /* Get a new CAM offset */
1503                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1504                         /* This shall never happen, because we have checked the
1505                          * CAM availability in the 'validate'.
1506                          */
1507                         ECORE_DBG_BREAK_IF(1);
1508                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1509                         return ECORE_INVAL;
1510                 }
1511
1512                 ECORE_MSG("Got cam offset %d", reg_elem->cam_offset);
1513
1514                 /* Set a VLAN-MAC data */
1515                 ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1516                              sizeof(reg_elem->u));
1517
1518                 /* Copy the flags (needed for DEL and RESTORE flows) */
1519                 reg_elem->vlan_mac_flags =
1520                     elem->cmd_data.vlan_mac.vlan_mac_flags;
1521         } else                  /* DEL, RESTORE */
1522                 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1523
1524         *re = reg_elem;
1525         return ECORE_SUCCESS;
1526 }
1527
1528 /**
1529  * ecore_execute_vlan_mac - execute vlan mac command
1530  *
1531  * @sc:                 device handle
1532  * @qo:
1533  * @exe_chunk:
1534  * @ramrod_flags:
1535  *
1536  * go and send a ramrod!
1537  */
1538 static int ecore_execute_vlan_mac(struct bnx2x_softc *sc,
1539                                   union ecore_qable_obj *qo,
1540                                   ecore_list_t * exe_chunk,
1541                                   unsigned long *ramrod_flags)
1542 {
1543         struct ecore_exeq_elem *elem;
1544         struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1545         struct ecore_raw_obj *r = &o->raw;
1546         int rc, idx = 0;
1547         int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1548         int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1549         struct ecore_vlan_mac_registry_elem *reg_elem;
1550         enum ecore_vlan_mac_cmd cmd;
1551
1552         /* If DRIVER_ONLY execution is requested, cleanup a registry
1553          * and exit. Otherwise send a ramrod to FW.
1554          */
1555         if (!drv_only) {
1556
1557                 /* Set pending */
1558                 r->set_pending(r);
1559
1560                 /* Fill the ramrod data */
1561                 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1562                                           struct ecore_exeq_elem) {
1563                         cmd = elem->cmd_data.vlan_mac.cmd;
1564                         /* We will add to the target object in MOVE command, so
1565                          * change the object for a CAM search.
1566                          */
1567                         if (cmd == ECORE_VLAN_MAC_MOVE)
1568                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1569                         else
1570                                 cam_obj = o;
1571
1572                         rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1573                                                               elem, restore,
1574                                                               &reg_elem);
1575                         if (rc)
1576                                 goto error_exit;
1577
1578                         ECORE_DBG_BREAK_IF(!reg_elem);
1579
1580                         /* Push a new entry into the registry */
1581                         if (!restore &&
1582                             ((cmd == ECORE_VLAN_MAC_ADD) ||
1583                              (cmd == ECORE_VLAN_MAC_MOVE)))
1584                                 ECORE_LIST_PUSH_HEAD(&reg_elem->link,
1585                                                      &cam_obj->head);
1586
1587                         /* Configure a single command in a ramrod data buffer */
1588                         o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset);
1589
1590                         /* MOVE command consumes 2 entries in the ramrod data */
1591                         if (cmd == ECORE_VLAN_MAC_MOVE)
1592                                 idx += 2;
1593                         else
1594                                 idx++;
1595                 }
1596
1597                 /*
1598                  *  No need for an explicit memory barrier here as long we would
1599                  *  need to ensure the ordering of writing to the SPQ element
1600                  *  and updating of the SPQ producer which involves a memory
1601                  *  read and we will have to put a full memory barrier there
1602                  *  (inside ecore_sp_post()).
1603                  */
1604
1605                 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1606                                    r->rdata_mapping, ETH_CONNECTION_TYPE);
1607                 if (rc)
1608                         goto error_exit;
1609         }
1610
1611         /* Now, when we are done with the ramrod - clean up the registry */
1612         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1613                 cmd = elem->cmd_data.vlan_mac.cmd;
1614                 if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) {
1615                         reg_elem = o->check_del(sc, o,
1616                                                 &elem->cmd_data.vlan_mac.u);
1617
1618                         ECORE_DBG_BREAK_IF(!reg_elem);
1619
1620                         o->put_cam_offset(o, reg_elem->cam_offset);
1621                         ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
1622                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1623                 }
1624         }
1625
1626         if (!drv_only)
1627                 return ECORE_PENDING;
1628         else
1629                 return ECORE_SUCCESS;
1630
1631 error_exit:
1632         r->clear_pending(r);
1633
1634         /* Cleanup a registry in case of a failure */
1635         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1636                 cmd = elem->cmd_data.vlan_mac.cmd;
1637
1638                 if (cmd == ECORE_VLAN_MAC_MOVE)
1639                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1640                 else
1641                         cam_obj = o;
1642
1643                 /* Delete all newly added above entries */
1644                 if (!restore &&
1645                     ((cmd == ECORE_VLAN_MAC_ADD) ||
1646                      (cmd == ECORE_VLAN_MAC_MOVE))) {
1647                         reg_elem = o->check_del(sc, cam_obj,
1648                                                 &elem->cmd_data.vlan_mac.u);
1649                         if (reg_elem) {
1650                                 ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
1651                                                         &cam_obj->head);
1652                                 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1653                         }
1654                 }
1655         }
1656
1657         return rc;
1658 }
1659
1660 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct
1661                                        ecore_vlan_mac_ramrod_params *p)
1662 {
1663         struct ecore_exeq_elem *elem;
1664         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1665         int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1666
1667         /* Allocate the execution queue element */
1668         elem = ecore_exe_queue_alloc_elem(sc);
1669         if (!elem)
1670                 return ECORE_NOMEM;
1671
1672         /* Set the command 'length' */
1673         switch (p->user_req.cmd) {
1674         case ECORE_VLAN_MAC_MOVE:
1675                 elem->cmd_len = 2;
1676                 break;
1677         default:
1678                 elem->cmd_len = 1;
1679         }
1680
1681         /* Fill the object specific info */
1682         ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req,
1683                      sizeof(p->user_req));
1684
1685         /* Try to add a new command to the pending list */
1686         return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1687 }
1688
1689 /**
1690  * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1691  *
1692  * @sc:   device handle
1693  * @p:
1694  *
1695  */
1696 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
1697                           struct ecore_vlan_mac_ramrod_params *p)
1698 {
1699         int rc = ECORE_SUCCESS;
1700         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1701         unsigned long *ramrod_flags = &p->ramrod_flags;
1702         int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
1703         struct ecore_raw_obj *raw = &o->raw;
1704
1705         /*
1706          * Add new elements to the execution list for commands that require it.
1707          */
1708         if (!cont) {
1709                 rc = ecore_vlan_mac_push_new_cmd(sc, p);
1710                 if (rc)
1711                         return rc;
1712         }
1713
1714         /* If nothing will be executed further in this iteration we want to
1715          * return PENDING if there are pending commands
1716          */
1717         if (!ecore_exe_queue_empty(&o->exe_queue))
1718                 rc = ECORE_PENDING;
1719
1720         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1721                 ECORE_MSG
1722                     ("RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
1723                 raw->clear_pending(raw);
1724         }
1725
1726         /* Execute commands if required */
1727         if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
1728             ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
1729                 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
1730                                                    &p->ramrod_flags);
1731                 if (rc < 0)
1732                         return rc;
1733         }
1734
1735         /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1736          * then user want to wait until the last command is done.
1737          */
1738         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1739                 /* Wait maximum for the current exe_queue length iterations plus
1740                  * one (for the current pending command).
1741                  */
1742                 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
1743
1744                 while (!ecore_exe_queue_empty(&o->exe_queue) &&
1745                        max_iterations--) {
1746
1747                         /* Wait for the current command to complete */
1748                         rc = raw->wait_comp(sc, raw);
1749                         if (rc)
1750                                 return rc;
1751
1752                         /* Make a next step */
1753                         rc = __ecore_vlan_mac_execute_step(sc,
1754                                                            p->vlan_mac_obj,
1755                                                            &p->ramrod_flags);
1756                         if (rc < 0)
1757                                 return rc;
1758                 }
1759
1760                 return ECORE_SUCCESS;
1761         }
1762
1763         return rc;
1764 }
1765
1766 /**
1767  * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1768  *
1769  * @sc:                 device handle
1770  * @o:
1771  * @vlan_mac_flags:
1772  * @ramrod_flags:       execution flags to be used for this deletion
1773  *
1774  * if the last operation has completed successfully and there are no
1775  * more elements left, positive value if the last operation has completed
1776  * successfully and there are more previously configured elements, negative
1777  * value is current operation has failed.
1778  */
1779 static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
1780                                   struct ecore_vlan_mac_obj *o,
1781                                   unsigned long *vlan_mac_flags,
1782                                   unsigned long *ramrod_flags)
1783 {
1784         struct ecore_vlan_mac_registry_elem *pos = NULL;
1785         int rc = 0, read_lock;
1786         struct ecore_vlan_mac_ramrod_params p;
1787         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1788         struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
1789
1790         /* Clear pending commands first */
1791
1792         ECORE_SPIN_LOCK_BH(&exeq->lock);
1793
1794         ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
1795                                        &exeq->exe_queue, link,
1796                                        struct ecore_exeq_elem) {
1797                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1798                     *vlan_mac_flags) {
1799                         rc = exeq->remove(sc, exeq->owner, exeq_pos);
1800                         if (rc) {
1801                                 PMD_DRV_LOG(ERR, "Failed to remove command");
1802                                 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1803                                 return rc;
1804                         }
1805                         ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
1806                                                 &exeq->exe_queue);
1807                         ecore_exe_queue_free_elem(sc, exeq_pos);
1808                 }
1809         }
1810
1811         ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1812
1813         /* Prepare a command request */
1814         ECORE_MEMSET(&p, 0, sizeof(p));
1815         p.vlan_mac_obj = o;
1816         p.ramrod_flags = *ramrod_flags;
1817         p.user_req.cmd = ECORE_VLAN_MAC_DEL;
1818
1819         /* Add all but the last VLAN-MAC to the execution queue without actually
1820          * execution anything.
1821          */
1822         ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
1823         ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
1824         ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1825
1826         ECORE_MSG("vlan_mac_del_all -- taking vlan_mac_lock (reader)");
1827         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
1828         if (read_lock != ECORE_SUCCESS)
1829                 return read_lock;
1830
1831         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1832                                   struct ecore_vlan_mac_registry_elem) {
1833                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1834                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1835                         ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
1836                         rc = ecore_config_vlan_mac(sc, &p);
1837                         if (rc < 0) {
1838                                 PMD_DRV_LOG(ERR,
1839                                             "Failed to add a new DEL command");
1840                                 ecore_vlan_mac_h_read_unlock(sc, o);
1841                                 return rc;
1842                         }
1843                 }
1844         }
1845
1846         ECORE_MSG("vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
1847         ecore_vlan_mac_h_read_unlock(sc, o);
1848
1849         p.ramrod_flags = *ramrod_flags;
1850         ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1851
1852         return ecore_config_vlan_mac(sc, &p);
1853 }
1854
1855 static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
1856                                uint32_t cid, uint8_t func_id,
1857                                void *rdata,
1858                                ecore_dma_addr_t rdata_mapping, int state,
1859                                unsigned long *pstate, ecore_obj_type type)
1860 {
1861         raw->func_id = func_id;
1862         raw->cid = cid;
1863         raw->cl_id = cl_id;
1864         raw->rdata = rdata;
1865         raw->rdata_mapping = rdata_mapping;
1866         raw->state = state;
1867         raw->pstate = pstate;
1868         raw->obj_type = type;
1869         raw->check_pending = ecore_raw_check_pending;
1870         raw->clear_pending = ecore_raw_clear_pending;
1871         raw->set_pending = ecore_raw_set_pending;
1872         raw->wait_comp = ecore_raw_wait;
1873 }
1874
1875 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
1876                                        uint8_t cl_id, uint32_t cid,
1877                                        uint8_t func_id, void *rdata,
1878                                        ecore_dma_addr_t rdata_mapping,
1879                                        int state, unsigned long *pstate,
1880                                        ecore_obj_type type,
1881                                        struct ecore_credit_pool_obj
1882                                        *macs_pool, struct ecore_credit_pool_obj
1883                                        *vlans_pool)
1884 {
1885         ECORE_LIST_INIT(&o->head);
1886         o->head_reader = 0;
1887         o->head_exe_request = FALSE;
1888         o->saved_ramrod_flags = 0;
1889
1890         o->macs_pool = macs_pool;
1891         o->vlans_pool = vlans_pool;
1892
1893         o->delete_all = ecore_vlan_mac_del_all;
1894         o->restore = ecore_vlan_mac_restore;
1895         o->complete = ecore_complete_vlan_mac;
1896         o->wait = ecore_wait_vlan_mac;
1897
1898         ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1899                            state, pstate, type);
1900 }
1901
1902 void ecore_init_mac_obj(struct bnx2x_softc *sc,
1903                         struct ecore_vlan_mac_obj *mac_obj,
1904                         uint8_t cl_id, uint32_t cid, uint8_t func_id,
1905                         void *rdata, ecore_dma_addr_t rdata_mapping, int state,
1906                         unsigned long *pstate, ecore_obj_type type,
1907                         struct ecore_credit_pool_obj *macs_pool)
1908 {
1909         union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
1910
1911         ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1912                                    rdata_mapping, state, pstate, type,
1913                                    macs_pool, NULL);
1914
1915         /* CAM credit pool handling */
1916         mac_obj->get_credit = ecore_get_credit_mac;
1917         mac_obj->put_credit = ecore_put_credit_mac;
1918         mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
1919         mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
1920
1921         if (CHIP_IS_E1x(sc)) {
1922                 mac_obj->set_one_rule = ecore_set_one_mac_e1x;
1923                 mac_obj->check_del = ecore_check_mac_del;
1924                 mac_obj->check_add = ecore_check_mac_add;
1925                 mac_obj->check_move = ecore_check_move_always_err;
1926                 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1927
1928                 /* Exe Queue */
1929                 ecore_exe_queue_init(sc,
1930                                      &mac_obj->exe_queue, 1, qable_obj,
1931                                      ecore_validate_vlan_mac,
1932                                      ecore_remove_vlan_mac,
1933                                      ecore_optimize_vlan_mac,
1934                                      ecore_execute_vlan_mac,
1935                                      ecore_exeq_get_mac);
1936         } else {
1937                 mac_obj->set_one_rule = ecore_set_one_mac_e2;
1938                 mac_obj->check_del = ecore_check_mac_del;
1939                 mac_obj->check_add = ecore_check_mac_add;
1940                 mac_obj->check_move = ecore_check_move;
1941                 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1942                 mac_obj->get_n_elements = ecore_get_n_elements;
1943
1944                 /* Exe Queue */
1945                 ecore_exe_queue_init(sc,
1946                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1947                                      qable_obj, ecore_validate_vlan_mac,
1948                                      ecore_remove_vlan_mac,
1949                                      ecore_optimize_vlan_mac,
1950                                      ecore_execute_vlan_mac,
1951                                      ecore_exeq_get_mac);
1952         }
1953 }
1954
1955 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1956 static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct
1957                                        tstorm_eth_mac_filter_config
1958                                        *mac_filters, uint16_t pf_id)
1959 {
1960         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1961
1962         uint32_t addr = BAR_TSTRORM_INTMEM +
1963             TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1964
1965         ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters);
1966 }
1967
1968 static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
1969                                  struct ecore_rx_mode_ramrod_params *p)
1970 {
1971         /* update the sc MAC filter structure */
1972         uint32_t mask = (1 << p->cl_id);
1973
1974         struct tstorm_eth_mac_filter_config *mac_filters =
1975             (struct tstorm_eth_mac_filter_config *)p->rdata;
1976
1977         /* initial setting is drop-all */
1978         uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
1979         uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
1980         uint8_t unmatched_unicast = 0;
1981
1982         /* In e1x there we only take into account rx accept flag since tx switching
1983          * isn't enabled. */
1984         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
1985                 /* accept matched ucast */
1986                 drop_all_ucast = 0;
1987
1988         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
1989                 /* accept matched mcast */
1990                 drop_all_mcast = 0;
1991
1992         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
1993                 /* accept all mcast */
1994                 drop_all_ucast = 0;
1995                 accp_all_ucast = 1;
1996         }
1997         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
1998                 /* accept all mcast */
1999                 drop_all_mcast = 0;
2000                 accp_all_mcast = 1;
2001         }
2002         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
2003                 /* accept (all) bcast */
2004                 accp_all_bcast = 1;
2005         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2006                 /* accept unmatched unicasts */
2007                 unmatched_unicast = 1;
2008
2009         mac_filters->ucast_drop_all = drop_all_ucast ?
2010             mac_filters->ucast_drop_all | mask :
2011             mac_filters->ucast_drop_all & ~mask;
2012
2013         mac_filters->mcast_drop_all = drop_all_mcast ?
2014             mac_filters->mcast_drop_all | mask :
2015             mac_filters->mcast_drop_all & ~mask;
2016
2017         mac_filters->ucast_accept_all = accp_all_ucast ?
2018             mac_filters->ucast_accept_all | mask :
2019             mac_filters->ucast_accept_all & ~mask;
2020
2021         mac_filters->mcast_accept_all = accp_all_mcast ?
2022             mac_filters->mcast_accept_all | mask :
2023             mac_filters->mcast_accept_all & ~mask;
2024
2025         mac_filters->bcast_accept_all = accp_all_bcast ?
2026             mac_filters->bcast_accept_all | mask :
2027             mac_filters->bcast_accept_all & ~mask;
2028
2029         mac_filters->unmatched_unicast = unmatched_unicast ?
2030             mac_filters->unmatched_unicast | mask :
2031             mac_filters->unmatched_unicast & ~mask;
2032
2033         ECORE_MSG("drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
2034                   "accp_mcast 0x%xaccp_bcast 0x%x",
2035                   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2036                   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2037                   mac_filters->bcast_accept_all);
2038
2039         /* write the MAC filter structure */
2040         __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2041
2042         /* The operation is completed */
2043         ECORE_CLEAR_BIT(p->state, p->pstate);
2044         ECORE_SMP_MB_AFTER_CLEAR_BIT();
2045
2046         return ECORE_SUCCESS;
2047 }
2048
2049 /* Setup ramrod data */
2050 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header
2051                                            *hdr, uint8_t rule_cnt)
2052 {
2053         hdr->echo = ECORE_CPU_TO_LE32(cid);
2054         hdr->rule_cnt = rule_cnt;
2055 }
2056
2057 static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd
2058                                            *cmd, int clear_accept_all)
2059 {
2060         uint16_t state;
2061
2062         /* start with 'drop-all' */
2063         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2064             ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2065
2066         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2067                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2068
2069         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2070                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2071
2072         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2073                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2074                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2075         }
2076
2077         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2078                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2079                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2080         }
2081         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2082                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2083
2084         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2085                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2086                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2087         }
2088         if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2089                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2090
2091         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2092         if (clear_accept_all) {
2093                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2094                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2095                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2096                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2097         }
2098
2099         cmd->state = ECORE_CPU_TO_LE16(state);
2100 }
2101
2102 static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
2103                                 struct ecore_rx_mode_ramrod_params *p)
2104 {
2105         struct eth_filter_rules_ramrod_data *data = p->rdata;
2106         int rc;
2107         uint8_t rule_idx = 0;
2108
2109         /* Reset the ramrod data buffer */
2110         ECORE_MEMSET(data, 0, sizeof(*data));
2111
2112         /* Setup ramrod data */
2113
2114         /* Tx (internal switching) */
2115         if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2116                 data->rules[rule_idx].client_id = p->cl_id;
2117                 data->rules[rule_idx].func_id = p->func_id;
2118
2119                 data->rules[rule_idx].cmd_general_data =
2120                     ETH_FILTER_RULES_CMD_TX_CMD;
2121
2122                 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2123                                                &(data->rules[rule_idx++]),
2124                                                FALSE);
2125         }
2126
2127         /* Rx */
2128         if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2129                 data->rules[rule_idx].client_id = p->cl_id;
2130                 data->rules[rule_idx].func_id = p->func_id;
2131
2132                 data->rules[rule_idx].cmd_general_data =
2133                     ETH_FILTER_RULES_CMD_RX_CMD;
2134
2135                 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2136                                                &(data->rules[rule_idx++]),
2137                                                FALSE);
2138         }
2139
2140         /* If FCoE Queue configuration has been requested configure the Rx and
2141          * internal switching modes for this queue in separate rules.
2142          *
2143          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2144          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2145          */
2146         if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2147                 /*  Tx (internal switching) */
2148                 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2149                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2150                         data->rules[rule_idx].func_id = p->func_id;
2151
2152                         data->rules[rule_idx].cmd_general_data =
2153                             ETH_FILTER_RULES_CMD_TX_CMD;
2154
2155                         ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2156                                                        &(data->rules
2157                                                          [rule_idx++]), TRUE);
2158                 }
2159
2160                 /* Rx */
2161                 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2162                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2163                         data->rules[rule_idx].func_id = p->func_id;
2164
2165                         data->rules[rule_idx].cmd_general_data =
2166                             ETH_FILTER_RULES_CMD_RX_CMD;
2167
2168                         ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2169                                                        &(data->rules
2170                                                          [rule_idx++]), TRUE);
2171                 }
2172         }
2173
2174         /* Set the ramrod header (most importantly - number of rules to
2175          * configure).
2176          */
2177         ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2178
2179         ECORE_MSG
2180             ("About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
2181              data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
2182
2183         /* No need for an explicit memory barrier here as long we would
2184          * need to ensure the ordering of writing to the SPQ element
2185          * and updating of the SPQ producer which involves a memory
2186          * read and we will have to put a full memory barrier there
2187          * (inside ecore_sp_post()).
2188          */
2189
2190         /* Send a ramrod */
2191         rc = ecore_sp_post(sc,
2192                            RAMROD_CMD_ID_ETH_FILTER_RULES,
2193                            p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE);
2194         if (rc)
2195                 return rc;
2196
2197         /* Ramrod completion is pending */
2198         return ECORE_PENDING;
2199 }
2200
2201 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc,
2202                                       struct ecore_rx_mode_ramrod_params *p)
2203 {
2204         return ecore_state_wait(sc, p->state, p->pstate);
2205 }
2206
2207 static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc,
2208                                     __rte_unused struct
2209                                     ecore_rx_mode_ramrod_params *p)
2210 {
2211         /* Do nothing */
2212         return ECORE_SUCCESS;
2213 }
2214
2215 int ecore_config_rx_mode(struct bnx2x_softc *sc,
2216                          struct ecore_rx_mode_ramrod_params *p)
2217 {
2218         int rc;
2219
2220         /* Configure the new classification in the chip */
2221         if (p->rx_mode_obj->config_rx_mode) {
2222                 rc = p->rx_mode_obj->config_rx_mode(sc, p);
2223                 if (rc < 0)
2224                         return rc;
2225
2226                 /* Wait for a ramrod completion if was requested */
2227                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2228                         rc = p->rx_mode_obj->wait_comp(sc, p);
2229                         if (rc)
2230                                 return rc;
2231                 }
2232         } else {
2233                 ECORE_MSG("ERROR: config_rx_mode is NULL");
2234                 return -1;
2235         }
2236
2237         return rc;
2238 }
2239
2240 void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o)
2241 {
2242         if (CHIP_IS_E1x(sc)) {
2243                 o->wait_comp = ecore_empty_rx_mode_wait;
2244                 o->config_rx_mode = ecore_set_rx_mode_e1x;
2245         } else {
2246                 o->wait_comp = ecore_wait_rx_mode_comp_e2;
2247                 o->config_rx_mode = ecore_set_rx_mode_e2;
2248         }
2249 }
2250
2251 /********************* Multicast verbs: SET, CLEAR ****************************/
2252 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac)
2253 {
2254         return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2255 }
2256
2257 struct ecore_mcast_mac_elem {
2258         ecore_list_entry_t link;
2259         uint8_t mac[ETH_ALEN];
2260         uint8_t pad[2];         /* For a natural alignment of the following buffer */
2261 };
2262
2263 struct ecore_pending_mcast_cmd {
2264         ecore_list_entry_t link;
2265         int type;               /* ECORE_MCAST_CMD_X */
2266         union {
2267                 ecore_list_t macs_head;
2268                 uint32_t macs_num;      /* Needed for DEL command */
2269                 int next_bin;   /* Needed for RESTORE flow with aprox match */
2270         } data;
2271
2272         int done;               /* set to TRUE, when the command has been handled,
2273                                  * practically used in 57712 handling only, where one pending
2274                                  * command may be handled in a few operations. As long as for
2275                                  * other chips every operation handling is completed in a
2276                                  * single ramrod, there is no need to utilize this field.
2277                                  */
2278 };
2279
2280 static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o)
2281 {
2282         if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2283             o->raw.wait_comp(sc, &o->raw))
2284                 return ECORE_TIMEOUT;
2285
2286         return ECORE_SUCCESS;
2287 }
2288
2289 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
2290                                    struct ecore_mcast_obj *o,
2291                                    struct ecore_mcast_ramrod_params *p,
2292                                    enum ecore_mcast_cmd cmd)
2293 {
2294         int total_sz;
2295         struct ecore_pending_mcast_cmd *new_cmd;
2296         struct ecore_mcast_mac_elem *cur_mac = NULL;
2297         struct ecore_mcast_list_elem *pos;
2298         int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2299                              p->mcast_list_len : 0);
2300
2301         /* If the command is empty ("handle pending commands only"), break */
2302         if (!p->mcast_list_len)
2303                 return ECORE_SUCCESS;
2304
2305         total_sz = sizeof(*new_cmd) +
2306             macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2307
2308         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2309         new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2310
2311         if (!new_cmd)
2312                 return ECORE_NOMEM;
2313
2314         ECORE_MSG("About to enqueue a new %d command. macs_list_len=%d",
2315                   cmd, macs_list_len);
2316
2317         ECORE_LIST_INIT(&new_cmd->data.macs_head);
2318
2319         new_cmd->type = cmd;
2320         new_cmd->done = FALSE;
2321
2322         switch (cmd) {
2323         case ECORE_MCAST_CMD_ADD:
2324                 cur_mac = (struct ecore_mcast_mac_elem *)
2325                     ((uint8_t *) new_cmd + sizeof(*new_cmd));
2326
2327                 /* Push the MACs of the current command into the pending command
2328                  * MACs list: FIFO
2329                  */
2330                 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2331                                           struct ecore_mcast_list_elem) {
2332                         ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2333                         ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2334                                              &new_cmd->data.macs_head);
2335                         cur_mac++;
2336                 }
2337
2338                 break;
2339
2340         case ECORE_MCAST_CMD_DEL:
2341                 new_cmd->data.macs_num = p->mcast_list_len;
2342                 break;
2343
2344         case ECORE_MCAST_CMD_RESTORE:
2345                 new_cmd->data.next_bin = 0;
2346                 break;
2347
2348         default:
2349                 ECORE_FREE(sc, new_cmd, total_sz);
2350                 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2351                 return ECORE_INVAL;
2352         }
2353
2354         /* Push the new pending command to the tail of the pending list: FIFO */
2355         ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2356
2357         o->set_sched(o);
2358
2359         return ECORE_PENDING;
2360 }
2361
2362 /**
2363  * ecore_mcast_get_next_bin - get the next set bin (index)
2364  *
2365  * @o:
2366  * @last:       index to start looking from (including)
2367  *
2368  * returns the next found (set) bin or a negative value if none is found.
2369  */
2370 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2371 {
2372         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2373
2374         for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2375                 if (o->registry.aprox_match.vec[i])
2376                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2377                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2378                                 if (BIT_VEC64_TEST_BIT
2379                                     (o->registry.aprox_match.vec, cur_bit)) {
2380                                         return cur_bit;
2381                                 }
2382                         }
2383                 inner_start = 0;
2384         }
2385
2386         /* None found */
2387         return -1;
2388 }
2389
2390 /**
2391  * ecore_mcast_clear_first_bin - find the first set bin and clear it
2392  *
2393  * @o:
2394  *
2395  * returns the index of the found bin or -1 if none is found
2396  */
2397 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2398 {
2399         int cur_bit = ecore_mcast_get_next_bin(o, 0);
2400
2401         if (cur_bit >= 0)
2402                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2403
2404         return cur_bit;
2405 }
2406
2407 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2408 {
2409         struct ecore_raw_obj *raw = &o->raw;
2410         uint8_t rx_tx_flag = 0;
2411
2412         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2413             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2414                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2415
2416         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2417             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2418                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2419
2420         return rx_tx_flag;
2421 }
2422
2423 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
2424                                         struct ecore_mcast_obj *o, int idx,
2425                                         union ecore_mcast_config_data *cfg_data,
2426                                         enum ecore_mcast_cmd cmd)
2427 {
2428         struct ecore_raw_obj *r = &o->raw;
2429         struct eth_multicast_rules_ramrod_data *data =
2430             (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2431         uint8_t func_id = r->func_id;
2432         uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2433         int bin;
2434
2435         if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2436                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2437
2438         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2439
2440         /* Get a bin and update a bins' vector */
2441         switch (cmd) {
2442         case ECORE_MCAST_CMD_ADD:
2443                 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2444                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2445                 break;
2446
2447         case ECORE_MCAST_CMD_DEL:
2448                 /* If there were no more bins to clear
2449                  * (ecore_mcast_clear_first_bin() returns -1) then we would
2450                  * clear any (0xff) bin.
2451                  * See ecore_mcast_validate_e2() for explanation when it may
2452                  * happen.
2453                  */
2454                 bin = ecore_mcast_clear_first_bin(o);
2455                 break;
2456
2457         case ECORE_MCAST_CMD_RESTORE:
2458                 bin = cfg_data->bin;
2459                 break;
2460
2461         default:
2462                 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2463                 return;
2464         }
2465
2466         ECORE_MSG("%s bin %d",
2467                   ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2468                    "Setting" : "Clearing"), bin);
2469
2470         data->rules[idx].bin_id = (uint8_t) bin;
2471         data->rules[idx].func_id = func_id;
2472         data->rules[idx].engine_id = o->engine_id;
2473 }
2474
2475 /**
2476  * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2477  *
2478  * @sc:         device handle
2479  * @o:
2480  * @start_bin:  index in the registry to start from (including)
2481  * @rdata_idx:  index in the ramrod data to start from
2482  *
2483  * returns last handled bin index or -1 if all bins have been handled
2484  */
2485 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
2486                                              struct ecore_mcast_obj *o,
2487                                              int start_bin, int *rdata_idx)
2488 {
2489         int cur_bin, cnt = *rdata_idx;
2490         union ecore_mcast_config_data cfg_data = { NULL };
2491
2492         /* go through the registry and configure the bins from it */
2493         for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2494              cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2495
2496                 cfg_data.bin = (uint8_t) cur_bin;
2497                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE);
2498
2499                 cnt++;
2500
2501                 ECORE_MSG("About to configure a bin %d", cur_bin);
2502
2503                 /* Break if we reached the maximum number
2504                  * of rules.
2505                  */
2506                 if (cnt >= o->max_cmd_len)
2507                         break;
2508         }
2509
2510         *rdata_idx = cnt;
2511
2512         return cur_bin;
2513 }
2514
2515 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
2516                                            struct ecore_mcast_obj *o,
2517                                            struct ecore_pending_mcast_cmd
2518                                            *cmd_pos, int *line_idx)
2519 {
2520         struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2521         int cnt = *line_idx;
2522         union ecore_mcast_config_data cfg_data = { NULL };
2523
2524         ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2525                                        &cmd_pos->data.macs_head, link,
2526                                        struct ecore_mcast_mac_elem) {
2527
2528                 cfg_data.mac = &pmac_pos->mac[0];
2529                 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2530
2531                 cnt++;
2532
2533                 ECORE_MSG
2534                     ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2535                      pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
2536                      pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2537
2538                 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2539                                         &cmd_pos->data.macs_head);
2540
2541                 /* Break if we reached the maximum number
2542                  * of rules.
2543                  */
2544                 if (cnt >= o->max_cmd_len)
2545                         break;
2546         }
2547
2548         *line_idx = cnt;
2549
2550         /* if no more MACs to configure - we are done */
2551         if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2552                 cmd_pos->done = TRUE;
2553 }
2554
2555 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
2556                                            struct ecore_mcast_obj *o,
2557                                            struct ecore_pending_mcast_cmd
2558                                            *cmd_pos, int *line_idx)
2559 {
2560         int cnt = *line_idx;
2561
2562         while (cmd_pos->data.macs_num) {
2563                 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2564
2565                 cnt++;
2566
2567                 cmd_pos->data.macs_num--;
2568
2569                 ECORE_MSG("Deleting MAC. %d left,cnt is %d",
2570                           cmd_pos->data.macs_num, cnt);
2571
2572                 /* Break if we reached the maximum
2573                  * number of rules.
2574                  */
2575                 if (cnt >= o->max_cmd_len)
2576                         break;
2577         }
2578
2579         *line_idx = cnt;
2580
2581         /* If we cleared all bins - we are done */
2582         if (!cmd_pos->data.macs_num)
2583                 cmd_pos->done = TRUE;
2584 }
2585
2586 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc,
2587                                                struct ecore_mcast_obj *o, struct
2588                                                ecore_pending_mcast_cmd
2589                                                *cmd_pos, int *line_idx)
2590 {
2591         cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2592                                                 line_idx);
2593
2594         if (cmd_pos->data.next_bin < 0)
2595                 /* If o->set_restore returned -1 we are done */
2596                 cmd_pos->done = TRUE;
2597         else
2598                 /* Start from the next bin next time */
2599                 cmd_pos->data.next_bin++;
2600 }
2601
2602 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
2603                                               ecore_mcast_ramrod_params
2604                                               *p)
2605 {
2606         struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2607         int cnt = 0;
2608         struct ecore_mcast_obj *o = p->mcast_obj;
2609
2610         ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
2611                                        &o->pending_cmds_head, link,
2612                                        struct ecore_pending_mcast_cmd) {
2613                 switch (cmd_pos->type) {
2614                 case ECORE_MCAST_CMD_ADD:
2615                         ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
2616                         break;
2617
2618                 case ECORE_MCAST_CMD_DEL:
2619                         ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
2620                         break;
2621
2622                 case ECORE_MCAST_CMD_RESTORE:
2623                         ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
2624                                                            &cnt);
2625                         break;
2626
2627                 default:
2628                         PMD_DRV_LOG(ERR, "Unknown command: %d", cmd_pos->type);
2629                         return ECORE_INVAL;
2630                 }
2631
2632                 /* If the command has been completed - remove it from the list
2633                  * and free the memory
2634                  */
2635                 if (cmd_pos->done) {
2636                         ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
2637                                                 &o->pending_cmds_head);
2638                         ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
2639                 }
2640
2641                 /* Break if we reached the maximum number of rules */
2642                 if (cnt >= o->max_cmd_len)
2643                         break;
2644         }
2645
2646         return cnt;
2647 }
2648
2649 static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
2650                                 struct ecore_mcast_obj *o,
2651                                 struct ecore_mcast_ramrod_params *p,
2652                                 int *line_idx)
2653 {
2654         struct ecore_mcast_list_elem *mlist_pos;
2655         union ecore_mcast_config_data cfg_data = { NULL };
2656         int cnt = *line_idx;
2657
2658         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2659                                   struct ecore_mcast_list_elem) {
2660                 cfg_data.mac = mlist_pos->mac;
2661                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
2662
2663                 cnt++;
2664
2665                 ECORE_MSG
2666                     ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2667                      mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2668                      mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
2669         }
2670
2671         *line_idx = cnt;
2672 }
2673
2674 static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
2675                                 struct ecore_mcast_obj *o,
2676                                 struct ecore_mcast_ramrod_params *p,
2677                                 int *line_idx)
2678 {
2679         int cnt = *line_idx, i;
2680
2681         for (i = 0; i < p->mcast_list_len; i++) {
2682                 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
2683
2684                 cnt++;
2685
2686                 ECORE_MSG("Deleting MAC. %d left", p->mcast_list_len - i - 1);
2687         }
2688
2689         *line_idx = cnt;
2690 }
2691
2692 /**
2693  * ecore_mcast_handle_current_cmd -
2694  *
2695  * @sc:         device handle
2696  * @p:
2697  * @cmd:
2698  * @start_cnt:  first line in the ramrod data that may be used
2699  *
2700  * This function is called iff there is enough place for the current command in
2701  * the ramrod data.
2702  * Returns number of lines filled in the ramrod data in total.
2703  */
2704 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
2705                                           ecore_mcast_ramrod_params *p,
2706                                           enum ecore_mcast_cmd cmd,
2707                                           int start_cnt)
2708 {
2709         struct ecore_mcast_obj *o = p->mcast_obj;
2710         int cnt = start_cnt;
2711
2712         ECORE_MSG("p->mcast_list_len=%d", p->mcast_list_len);
2713
2714         switch (cmd) {
2715         case ECORE_MCAST_CMD_ADD:
2716                 ecore_mcast_hdl_add(sc, o, p, &cnt);
2717                 break;
2718
2719         case ECORE_MCAST_CMD_DEL:
2720                 ecore_mcast_hdl_del(sc, o, p, &cnt);
2721                 break;
2722
2723         case ECORE_MCAST_CMD_RESTORE:
2724                 o->hdl_restore(sc, o, 0, &cnt);
2725                 break;
2726
2727         default:
2728                 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2729                 return ECORE_INVAL;
2730         }
2731
2732         /* The current command has been handled */
2733         p->mcast_list_len = 0;
2734
2735         return cnt;
2736 }
2737
2738 static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
2739                                    struct ecore_mcast_ramrod_params *p,
2740                                    enum ecore_mcast_cmd cmd)
2741 {
2742         struct ecore_mcast_obj *o = p->mcast_obj;
2743         int reg_sz = o->get_registry_size(o);
2744
2745         switch (cmd) {
2746                 /* DEL command deletes all currently configured MACs */
2747         case ECORE_MCAST_CMD_DEL:
2748                 o->set_registry_size(o, 0);
2749                 /* Don't break */
2750
2751                 /* RESTORE command will restore the entire multicast configuration */
2752         case ECORE_MCAST_CMD_RESTORE:
2753                 /* Here we set the approximate amount of work to do, which in
2754                  * fact may be only less as some MACs in postponed ADD
2755                  * command(s) scheduled before this command may fall into
2756                  * the same bin and the actual number of bins set in the
2757                  * registry would be less than we estimated here. See
2758                  * ecore_mcast_set_one_rule_e2() for further details.
2759                  */
2760                 p->mcast_list_len = reg_sz;
2761                 break;
2762
2763         case ECORE_MCAST_CMD_ADD:
2764         case ECORE_MCAST_CMD_CONT:
2765                 /* Here we assume that all new MACs will fall into new bins.
2766                  * However we will correct the real registry size after we
2767                  * handle all pending commands.
2768                  */
2769                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2770                 break;
2771
2772         default:
2773                 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2774                 return ECORE_INVAL;
2775         }
2776
2777         /* Increase the total number of MACs pending to be configured */
2778         o->total_pending_num += p->mcast_list_len;
2779
2780         return ECORE_SUCCESS;
2781 }
2782
2783 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
2784                                   struct ecore_mcast_ramrod_params *p,
2785                                   int old_num_bins)
2786 {
2787         struct ecore_mcast_obj *o = p->mcast_obj;
2788
2789         o->set_registry_size(o, old_num_bins);
2790         o->total_pending_num -= p->mcast_list_len;
2791 }
2792
2793 /**
2794  * ecore_mcast_set_rdata_hdr_e2 - sets a header values
2795  *
2796  * @sc:         device handle
2797  * @p:
2798  * @len:        number of rules to handle
2799  */
2800 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc
2801                                          *sc, struct ecore_mcast_ramrod_params
2802                                          *p, uint8_t len)
2803 {
2804         struct ecore_raw_obj *r = &p->mcast_obj->raw;
2805         struct eth_multicast_rules_ramrod_data *data =
2806             (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2807
2808         data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
2809                                               (ECORE_FILTER_MCAST_PENDING <<
2810                                                ECORE_SWCID_SHIFT));
2811         data->header.rule_cnt = len;
2812 }
2813
2814 /**
2815  * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2816  *
2817  * @sc:         device handle
2818  * @o:
2819  *
2820  * Recalculate the actual number of set bins in the registry using Brian
2821  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2822  */
2823 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)
2824 {
2825         int i, cnt = 0;
2826         uint64_t elem;
2827
2828         for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
2829                 elem = o->registry.aprox_match.vec[i];
2830                 for (; elem; cnt++)
2831                         elem &= elem - 1;
2832         }
2833
2834         o->set_registry_size(o, cnt);
2835
2836         return ECORE_SUCCESS;
2837 }
2838
2839 static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
2840                                 struct ecore_mcast_ramrod_params *p,
2841                                 enum ecore_mcast_cmd cmd)
2842 {
2843         struct ecore_raw_obj *raw = &p->mcast_obj->raw;
2844         struct ecore_mcast_obj *o = p->mcast_obj;
2845         struct eth_multicast_rules_ramrod_data *data =
2846             (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2847         int cnt = 0, rc;
2848
2849         /* Reset the ramrod data buffer */
2850         ECORE_MEMSET(data, 0, sizeof(*data));
2851
2852         cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
2853
2854         /* If there are no more pending commands - clear SCHEDULED state */
2855         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
2856                 o->clear_sched(o);
2857
2858         /* The below may be TRUE iff there was enough room in ramrod
2859          * data for all pending commands and for the current
2860          * command. Otherwise the current command would have been added
2861          * to the pending commands and p->mcast_list_len would have been
2862          * zeroed.
2863          */
2864         if (p->mcast_list_len > 0)
2865                 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
2866
2867         /* We've pulled out some MACs - update the total number of
2868          * outstanding.
2869          */
2870         o->total_pending_num -= cnt;
2871
2872         /* send a ramrod */
2873         ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
2874         ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
2875
2876         ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt);
2877
2878         /* Update a registry size if there are no more pending operations.
2879          *
2880          * We don't want to change the value of the registry size if there are
2881          * pending operations because we want it to always be equal to the
2882          * exact or the approximate number (see ecore_mcast_validate_e2()) of
2883          * set bins after the last requested operation in order to properly
2884          * evaluate the size of the next DEL/RESTORE operation.
2885          *
2886          * Note that we update the registry itself during command(s) handling
2887          * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
2888          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2889          * with a limited amount of update commands (per MAC/bin) and we don't
2890          * know in this scope what the actual state of bins configuration is
2891          * going to be after this ramrod.
2892          */
2893         if (!o->total_pending_num)
2894                 ecore_mcast_refresh_registry_e2(o);
2895
2896         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2897          * RAMROD_PENDING status immediately.
2898          */
2899         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2900                 raw->clear_pending(raw);
2901                 return ECORE_SUCCESS;
2902         } else {
2903                 /* No need for an explicit memory barrier here as long we would
2904                  * need to ensure the ordering of writing to the SPQ element
2905                  * and updating of the SPQ producer which involves a memory
2906                  * read and we will have to put a full memory barrier there
2907                  * (inside ecore_sp_post()).
2908                  */
2909
2910                 /* Send a ramrod */
2911                 rc = ecore_sp_post(sc,
2912                                    RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2913                                    raw->cid,
2914                                    raw->rdata_mapping, ETH_CONNECTION_TYPE);
2915                 if (rc)
2916                         return rc;
2917
2918                 /* Ramrod completion is pending */
2919                 return ECORE_PENDING;
2920         }
2921 }
2922
2923 static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
2924                                     struct ecore_mcast_ramrod_params *p,
2925                                     enum ecore_mcast_cmd cmd)
2926 {
2927         /* Mark, that there is a work to do */
2928         if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
2929                 p->mcast_list_len = 1;
2930
2931         return ECORE_SUCCESS;
2932 }
2933
2934 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
2935                                    __rte_unused struct ecore_mcast_ramrod_params
2936                                    *p, __rte_unused int old_num_bins)
2937 {
2938         /* Do nothing */
2939 }
2940
2941 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
2942 do { \
2943         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2944 } while (0)
2945
2946 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
2947                                     struct ecore_mcast_obj *o,
2948                                     struct ecore_mcast_ramrod_params *p,
2949                                     uint32_t * mc_filter)
2950 {
2951         struct ecore_mcast_list_elem *mlist_pos;
2952         int bit;
2953
2954         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2955                                   struct ecore_mcast_list_elem) {
2956                 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
2957                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2958
2959                 ECORE_MSG
2960                     ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
2961                      mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2962                      mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
2963                      bit);
2964
2965                 /* bookkeeping... */
2966                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);
2967         }
2968 }
2969
2970 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
2971                                         __rte_unused,
2972                                         struct ecore_mcast_obj *o,
2973                                         uint32_t * mc_filter)
2974 {
2975         int bit;
2976
2977         for (bit = ecore_mcast_get_next_bin(o, 0);
2978              bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
2979                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2980                 ECORE_MSG("About to set bin %d", bit);
2981         }
2982 }
2983
2984 /* On 57711 we write the multicast MACs' approximate match
2985  * table by directly into the TSTORM's internal RAM. So we don't
2986  * really need to handle any tricks to make it work.
2987  */
2988 static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
2989                                  struct ecore_mcast_ramrod_params *p,
2990                                  enum ecore_mcast_cmd cmd)
2991 {
2992         int i;
2993         struct ecore_mcast_obj *o = p->mcast_obj;
2994         struct ecore_raw_obj *r = &o->raw;
2995
2996         /* If CLEAR_ONLY has been requested - clear the registry
2997          * and clear a pending bit.
2998          */
2999         if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3000                 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 };
3001
3002                 /* Set the multicast filter bits before writing it into
3003                  * the internal memory.
3004                  */
3005                 switch (cmd) {
3006                 case ECORE_MCAST_CMD_ADD:
3007                         ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
3008                         break;
3009
3010                 case ECORE_MCAST_CMD_DEL:
3011                         ECORE_MSG(sc,
3012                                   "Invalidating multicast MACs configuration");
3013
3014                         /* clear the registry */
3015                         ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3016                                      sizeof(o->registry.aprox_match.vec));
3017                         break;
3018
3019                 case ECORE_MCAST_CMD_RESTORE:
3020                         ecore_mcast_hdl_restore_e1h(sc, o, mc_filter);
3021                         break;
3022
3023                 default:
3024                         PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
3025                         return ECORE_INVAL;
3026                 }
3027
3028                 /* Set the mcast filter in the internal memory */
3029                 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3030                         REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3031         } else
3032                 /* clear the registry */
3033                 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3034                              sizeof(o->registry.aprox_match.vec));
3035
3036         /* We are done */
3037         r->clear_pending(r);
3038
3039         return ECORE_SUCCESS;
3040 }
3041
3042 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3043 {
3044         return o->registry.aprox_match.num_bins_set;
3045 }
3046
3047 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3048                                                 int n)
3049 {
3050         o->registry.aprox_match.num_bins_set = n;
3051 }
3052
3053 int ecore_config_mcast(struct bnx2x_softc *sc,
3054                        struct ecore_mcast_ramrod_params *p,
3055                        enum ecore_mcast_cmd cmd)
3056 {
3057         struct ecore_mcast_obj *o = p->mcast_obj;
3058         struct ecore_raw_obj *r = &o->raw;
3059         int rc = 0, old_reg_size;
3060
3061         /* This is needed to recover number of currently configured mcast macs
3062          * in case of failure.
3063          */
3064         old_reg_size = o->get_registry_size(o);
3065
3066         /* Do some calculations and checks */
3067         rc = o->validate(sc, p, cmd);
3068         if (rc)
3069                 return rc;
3070
3071         /* Return if there is no work to do */
3072         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3073                 return ECORE_SUCCESS;
3074
3075         ECORE_MSG
3076             ("o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
3077              o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3078
3079         /* Enqueue the current command to the pending list if we can't complete
3080          * it in the current iteration
3081          */
3082         if (r->check_pending(r) ||
3083             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3084                 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3085                 if (rc < 0)
3086                         goto error_exit1;
3087
3088                 /* As long as the current command is in a command list we
3089                  * don't need to handle it separately.
3090                  */
3091                 p->mcast_list_len = 0;
3092         }
3093
3094         if (!r->check_pending(r)) {
3095
3096                 /* Set 'pending' state */
3097                 r->set_pending(r);
3098
3099                 /* Configure the new classification in the chip */
3100                 rc = o->config_mcast(sc, p, cmd);
3101                 if (rc < 0)
3102                         goto error_exit2;
3103
3104                 /* Wait for a ramrod completion if was requested */
3105                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3106                         rc = o->wait_comp(sc, o);
3107         }
3108
3109         return rc;
3110
3111 error_exit2:
3112         r->clear_pending(r);
3113
3114 error_exit1:
3115         o->revert(sc, p, old_reg_size);
3116
3117         return rc;
3118 }
3119
3120 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3121 {
3122         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3123         ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3124         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3125 }
3126
3127 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3128 {
3129         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3130         ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3131         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3132 }
3133
3134 static int ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3135 {
3136         return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3137 }
3138
3139 static int ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3140 {
3141         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3142 }
3143
3144 void ecore_init_mcast_obj(struct bnx2x_softc *sc,
3145                           struct ecore_mcast_obj *mcast_obj,
3146                           uint8_t mcast_cl_id, uint32_t mcast_cid,
3147                           uint8_t func_id, uint8_t engine_id, void *rdata,
3148                           ecore_dma_addr_t rdata_mapping, int state,
3149                           unsigned long *pstate, ecore_obj_type type)
3150 {
3151         ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3152
3153         ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3154                            rdata, rdata_mapping, state, pstate, type);
3155
3156         mcast_obj->engine_id = engine_id;
3157
3158         ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3159
3160         mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3161         mcast_obj->check_sched = ecore_mcast_check_sched;
3162         mcast_obj->set_sched = ecore_mcast_set_sched;
3163         mcast_obj->clear_sched = ecore_mcast_clear_sched;
3164
3165         if (CHIP_IS_E1H(sc)) {
3166                 mcast_obj->config_mcast = ecore_mcast_setup_e1h;
3167                 mcast_obj->enqueue_cmd = NULL;
3168                 mcast_obj->hdl_restore = NULL;
3169                 mcast_obj->check_pending = ecore_mcast_check_pending;
3170
3171                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3172                  * for one command.
3173                  */
3174                 mcast_obj->max_cmd_len = -1;
3175                 mcast_obj->wait_comp = ecore_mcast_wait;
3176                 mcast_obj->set_one_rule = NULL;
3177                 mcast_obj->validate = ecore_mcast_validate_e1h;
3178                 mcast_obj->revert = ecore_mcast_revert_e1h;
3179                 mcast_obj->get_registry_size =
3180                     ecore_mcast_get_registry_size_aprox;
3181                 mcast_obj->set_registry_size =
3182                     ecore_mcast_set_registry_size_aprox;
3183         } else {
3184                 mcast_obj->config_mcast = ecore_mcast_setup_e2;
3185                 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3186                 mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2;
3187                 mcast_obj->check_pending = ecore_mcast_check_pending;
3188                 mcast_obj->max_cmd_len = 16;
3189                 mcast_obj->wait_comp = ecore_mcast_wait;
3190                 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
3191                 mcast_obj->validate = ecore_mcast_validate_e2;
3192                 mcast_obj->revert = ecore_mcast_revert_e2;
3193                 mcast_obj->get_registry_size =
3194                     ecore_mcast_get_registry_size_aprox;
3195                 mcast_obj->set_registry_size =
3196                     ecore_mcast_set_registry_size_aprox;
3197         }
3198 }
3199
3200 /*************************** Credit handling **********************************/
3201
3202 /**
3203  * atomic_add_ifless - add if the result is less than a given value.
3204  *
3205  * @v:  pointer of type ecore_atomic_t
3206  * @a:  the amount to add to v...
3207  * @u:  ...if (v + a) is less than u.
3208  *
3209  * returns TRUE if (v + a) was less than u, and FALSE otherwise.
3210  *
3211  */
3212 static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u)
3213 {
3214         int c, old;
3215
3216         c = ECORE_ATOMIC_READ(v);
3217         for (;;) {
3218                 if (ECORE_UNLIKELY(c + a >= u))
3219                         return FALSE;
3220
3221                 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
3222                 if (ECORE_LIKELY(old == c))
3223                         break;
3224                 c = old;
3225         }
3226
3227         return TRUE;
3228 }
3229
3230 /**
3231  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3232  *
3233  * @v:  pointer of type ecore_atomic_t
3234  * @a:  the amount to dec from v...
3235  * @u:  ...if (v - a) is more or equal than u.
3236  *
3237  * returns TRUE if (v - a) was more or equal than u, and FALSE
3238  * otherwise.
3239  */
3240 static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u)
3241 {
3242         int c, old;
3243
3244         c = ECORE_ATOMIC_READ(v);
3245         for (;;) {
3246                 if (ECORE_UNLIKELY(c - a < u))
3247                         return FALSE;
3248
3249                 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
3250                 if (ECORE_LIKELY(old == c))
3251                         break;
3252                 c = old;
3253         }
3254
3255         return TRUE;
3256 }
3257
3258 static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
3259 {
3260         int rc;
3261
3262         ECORE_SMP_MB();
3263         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3264         ECORE_SMP_MB();
3265
3266         return rc;
3267 }
3268
3269 static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
3270 {
3271         int rc;
3272
3273         ECORE_SMP_MB();
3274
3275         /* Don't let to refill if credit + cnt > pool_sz */
3276         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3277
3278         ECORE_SMP_MB();
3279
3280         return rc;
3281 }
3282
3283 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
3284 {
3285         int cur_credit;
3286
3287         ECORE_SMP_MB();
3288         cur_credit = ECORE_ATOMIC_READ(&o->credit);
3289
3290         return cur_credit;
3291 }
3292
3293 static int ecore_credit_pool_always_TRUE(__rte_unused struct
3294                                          ecore_credit_pool_obj *o,
3295                                          __rte_unused int cnt)
3296 {
3297         return TRUE;
3298 }
3299
3300 static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o,
3301                                        int *offset)
3302 {
3303         int idx, vec, i;
3304
3305         *offset = -1;
3306
3307         /* Find "internal cam-offset" then add to base for this object... */
3308         for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
3309
3310                 /* Skip the current vector if there are no free entries in it */
3311                 if (!o->pool_mirror[vec])
3312                         continue;
3313
3314                 /* If we've got here we are going to find a free entry */
3315                 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3316                      i < BIT_VEC64_ELEM_SZ; idx++, i++)
3317
3318                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3319                                 /* Got one!! */
3320                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3321                                 *offset = o->base_pool_offset + idx;
3322                                 return TRUE;
3323                         }
3324         }
3325
3326         return FALSE;
3327 }
3328
3329 static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o,
3330                                        int offset)
3331 {
3332         if (offset < o->base_pool_offset)
3333                 return FALSE;
3334
3335         offset -= o->base_pool_offset;
3336
3337         if (offset >= o->pool_sz)
3338                 return FALSE;
3339
3340         /* Return the entry to the pool */
3341         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3342
3343         return TRUE;
3344 }
3345
3346 static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct
3347                                                    ecore_credit_pool_obj *o,
3348                                                    __rte_unused int offset)
3349 {
3350         return TRUE;
3351 }
3352
3353 static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
3354                                                    ecore_credit_pool_obj *o,
3355                                                    __rte_unused int *offset)
3356 {
3357         *offset = -1;
3358         return TRUE;
3359 }
3360
3361 /**
3362  * ecore_init_credit_pool - initialize credit pool internals.
3363  *
3364  * @p:
3365  * @base:       Base entry in the CAM to use.
3366  * @credit:     pool size.
3367  *
3368  * If base is negative no CAM entries handling will be performed.
3369  * If credit is negative pool operations will always succeed (unlimited pool).
3370  *
3371  */
3372 static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
3373                                    int base, int credit)
3374 {
3375         /* Zero the object first */
3376         ECORE_MEMSET(p, 0, sizeof(*p));
3377
3378         /* Set the table to all 1s */
3379         ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3380
3381         /* Init a pool as full */
3382         ECORE_ATOMIC_SET(&p->credit, credit);
3383
3384         /* The total poll size */
3385         p->pool_sz = credit;
3386
3387         p->base_pool_offset = base;
3388
3389         /* Commit the change */
3390         ECORE_SMP_MB();
3391
3392         p->check = ecore_credit_pool_check;
3393
3394         /* if pool credit is negative - disable the checks */
3395         if (credit >= 0) {
3396                 p->put = ecore_credit_pool_put;
3397                 p->get = ecore_credit_pool_get;
3398                 p->put_entry = ecore_credit_pool_put_entry;
3399                 p->get_entry = ecore_credit_pool_get_entry;
3400         } else {
3401                 p->put = ecore_credit_pool_always_TRUE;
3402                 p->get = ecore_credit_pool_always_TRUE;
3403                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3404                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3405         }
3406
3407         /* If base is negative - disable entries handling */
3408         if (base < 0) {
3409                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3410                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3411         }
3412 }
3413
3414 void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
3415                                 struct ecore_credit_pool_obj *p,
3416                                 uint8_t func_id, uint8_t func_num)
3417 {
3418
3419 #define ECORE_CAM_SIZE_EMUL 5
3420
3421         int cam_sz;
3422
3423         if (CHIP_IS_E1H(sc)) {
3424                 /* CAM credit is equally divided between all active functions
3425                  * on the PORT!.
3426                  */
3427                 if ((func_num > 0)) {
3428                         if (!CHIP_REV_IS_SLOW(sc))
3429                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
3430                         else
3431                                 cam_sz = ECORE_CAM_SIZE_EMUL;
3432                         ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
3433                 } else {
3434                         /* this should never happen! Block MAC operations. */
3435                         ecore_init_credit_pool(p, 0, 0);
3436                 }
3437
3438         } else {
3439
3440                 /*
3441                  * CAM credit is equaly divided between all active functions
3442                  * on the PATH.
3443                  */
3444                 if ((func_num > 0)) {
3445                         if (!CHIP_REV_IS_SLOW(sc))
3446                                 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3447                         else
3448                                 cam_sz = ECORE_CAM_SIZE_EMUL;
3449
3450                         /* No need for CAM entries handling for 57712 and
3451                          * newer.
3452                          */
3453                         ecore_init_credit_pool(p, -1, cam_sz);
3454                 } else {
3455                         /* this should never happen! Block MAC operations. */
3456                         ecore_init_credit_pool(p, 0, 0);
3457                 }
3458         }
3459 }
3460
3461 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
3462                                  struct ecore_credit_pool_obj *p,
3463                                  uint8_t func_id, uint8_t func_num)
3464 {
3465         if (CHIP_IS_E1x(sc)) {
3466                 /* There is no VLAN credit in HW on 57711 only
3467                  * MAC / MAC-VLAN can be set
3468                  */
3469                 ecore_init_credit_pool(p, 0, -1);
3470         } else {
3471                 /* CAM credit is equally divided between all active functions
3472                  * on the PATH.
3473                  */
3474                 if (func_num > 0) {
3475                         int credit = MAX_VLAN_CREDIT_E2 / func_num;
3476                         ecore_init_credit_pool(p, func_id * credit, credit);
3477                 } else
3478                         /* this should never happen! Block VLAN operations. */
3479                         ecore_init_credit_pool(p, 0, 0);
3480         }
3481 }
3482
3483 /****************** RSS Configuration ******************/
3484
3485 /**
3486  * ecore_setup_rss - configure RSS
3487  *
3488  * @sc:         device handle
3489  * @p:          rss configuration
3490  *
3491  * sends on UPDATE ramrod for that matter.
3492  */
3493 static int ecore_setup_rss(struct bnx2x_softc *sc,
3494                            struct ecore_config_rss_params *p)
3495 {
3496         struct ecore_rss_config_obj *o = p->rss_obj;
3497         struct ecore_raw_obj *r = &o->raw;
3498         struct eth_rss_update_ramrod_data *data =
3499             (struct eth_rss_update_ramrod_data *)(r->rdata);
3500         uint8_t rss_mode = 0;
3501         int rc;
3502
3503         ECORE_MEMSET(data, 0, sizeof(*data));
3504
3505         ECORE_MSG("Configuring RSS");
3506
3507         /* Set an echo field */
3508         data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3509                                        (r->state << ECORE_SWCID_SHIFT));
3510
3511         /* RSS mode */
3512         if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
3513                 rss_mode = ETH_RSS_MODE_DISABLED;
3514         else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
3515                 rss_mode = ETH_RSS_MODE_REGULAR;
3516
3517         data->rss_mode = rss_mode;
3518
3519         ECORE_MSG("rss_mode=%d", rss_mode);
3520
3521         /* RSS capabilities */
3522         if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
3523                 data->capabilities |=
3524                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
3525
3526         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
3527                 data->capabilities |=
3528                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
3529
3530         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
3531                 data->capabilities |=
3532                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
3533
3534         if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
3535                 data->capabilities |=
3536                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
3537
3538         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
3539                 data->capabilities |=
3540                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
3541
3542         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
3543                 data->capabilities |=
3544                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
3545
3546         if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
3547                 data->udp_4tuple_dst_port_mask =
3548                     ECORE_CPU_TO_LE16(p->tunnel_mask);
3549                 data->udp_4tuple_dst_port_value =
3550                     ECORE_CPU_TO_LE16(p->tunnel_value);
3551         }
3552
3553         /* Hashing mask */
3554         data->rss_result_mask = p->rss_result_mask;
3555
3556         /* RSS engine ID */
3557         data->rss_engine_id = o->engine_id;
3558
3559         ECORE_MSG("rss_engine_id=%d", data->rss_engine_id);
3560
3561         /* Indirection table */
3562         ECORE_MEMCPY(data->indirection_table, p->ind_table,
3563                      T_ETH_INDIRECTION_TABLE_SIZE);
3564
3565         /* Remember the last configuration */
3566         ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
3567
3568         /* RSS keys */
3569         if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
3570                 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
3571                              sizeof(data->rss_key));
3572                 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
3573         }
3574
3575         /* No need for an explicit memory barrier here as long we would
3576          * need to ensure the ordering of writing to the SPQ element
3577          * and updating of the SPQ producer which involves a memory
3578          * read and we will have to put a full memory barrier there
3579          * (inside ecore_sp_post()).
3580          */
3581
3582         /* Send a ramrod */
3583         rc = ecore_sp_post(sc,
3584                            RAMROD_CMD_ID_ETH_RSS_UPDATE,
3585                            r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE);
3586
3587         if (rc < 0)
3588                 return rc;
3589
3590         return ECORE_PENDING;
3591 }
3592
3593 int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
3594 {
3595         int rc;
3596         struct ecore_rss_config_obj *o = p->rss_obj;
3597         struct ecore_raw_obj *r = &o->raw;
3598
3599         /* Do nothing if only driver cleanup was requested */
3600         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
3601                 return ECORE_SUCCESS;
3602
3603         r->set_pending(r);
3604
3605         rc = o->config_rss(sc, p);
3606         if (rc < 0) {
3607                 r->clear_pending(r);
3608                 return rc;
3609         }
3610
3611         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3612                 rc = r->wait_comp(sc, r);
3613
3614         return rc;
3615 }
3616
3617 void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
3618                                uint8_t cl_id, uint32_t cid, uint8_t func_id,
3619                                uint8_t engine_id, void *rdata,
3620                                ecore_dma_addr_t rdata_mapping, int state,
3621                                unsigned long *pstate, ecore_obj_type type)
3622 {
3623         ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
3624                            rdata_mapping, state, pstate, type);
3625
3626         rss_obj->engine_id = engine_id;
3627         rss_obj->config_rss = ecore_setup_rss;
3628 }
3629
3630 /********************** Queue state object ***********************************/
3631
3632 /**
3633  * ecore_queue_state_change - perform Queue state change transition
3634  *
3635  * @sc:         device handle
3636  * @params:     parameters to perform the transition
3637  *
3638  * returns 0 in case of successfully completed transition, negative error
3639  * code in case of failure, positive (EBUSY) value if there is a completion
3640  * to that is still pending (possible only if RAMROD_COMP_WAIT is
3641  * not set in params->ramrod_flags for asynchronous commands).
3642  *
3643  */
3644 int ecore_queue_state_change(struct bnx2x_softc *sc,
3645                              struct ecore_queue_state_params *params)
3646 {
3647         struct ecore_queue_sp_obj *o = params->q_obj;
3648         int rc, pending_bit;
3649         unsigned long *pending = &o->pending;
3650
3651         /* Check that the requested transition is legal */
3652         rc = o->check_transition(sc, o, params);
3653         if (rc) {
3654                 PMD_DRV_LOG(ERR, "check transition returned an error. rc %d",
3655                             rc);
3656                 return ECORE_INVAL;
3657         }
3658
3659         /* Set "pending" bit */
3660         ECORE_MSG("pending bit was=%lx", o->pending);
3661         pending_bit = o->set_pending(o, params);
3662         ECORE_MSG("pending bit now=%lx", o->pending);
3663
3664         /* Don't send a command if only driver cleanup was requested */
3665         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
3666                 o->complete_cmd(sc, o, pending_bit);
3667         else {
3668                 /* Send a ramrod */
3669                 rc = o->send_cmd(sc, params);
3670                 if (rc) {
3671                         o->next_state = ECORE_Q_STATE_MAX;
3672                         ECORE_CLEAR_BIT(pending_bit, pending);
3673                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3674                         return rc;
3675                 }
3676
3677                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
3678                         rc = o->wait_comp(sc, o, pending_bit);
3679                         if (rc)
3680                                 return rc;
3681
3682                         return ECORE_SUCCESS;
3683                 }
3684         }
3685
3686         return ECORE_RET_PENDING(pending_bit, pending);
3687 }
3688
3689 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
3690                                    struct ecore_queue_state_params *params)
3691 {
3692         enum ecore_queue_cmd cmd = params->cmd, bit;
3693
3694         /* ACTIVATE and DEACTIVATE commands are implemented on top of
3695          * UPDATE command.
3696          */
3697         if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE))
3698                 bit = ECORE_Q_CMD_UPDATE;
3699         else
3700                 bit = cmd;
3701
3702         ECORE_SET_BIT(bit, &obj->pending);
3703         return bit;
3704 }
3705
3706 static int ecore_queue_wait_comp(struct bnx2x_softc *sc,
3707                                  struct ecore_queue_sp_obj *o,
3708                                  enum ecore_queue_cmd cmd)
3709 {
3710         return ecore_state_wait(sc, cmd, &o->pending);
3711 }
3712
3713 /**
3714  * ecore_queue_comp_cmd - complete the state change command.
3715  *
3716  * @sc:         device handle
3717  * @o:
3718  * @cmd:
3719  *
3720  * Checks that the arrived completion is expected.
3721  */
3722 static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
3723                                 struct ecore_queue_sp_obj *o,
3724                                 enum ecore_queue_cmd cmd)
3725 {
3726         unsigned long cur_pending = o->pending;
3727
3728         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
3729                 PMD_DRV_LOG(ERR,
3730                             "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
3731                             cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
3732                             cur_pending, o->next_state);
3733                 return ECORE_INVAL;
3734         }
3735
3736         if (o->next_tx_only >= o->max_cos)
3737                 /* >= because tx only must always be smaller than cos since the
3738                  * primary connection supports COS 0
3739                  */
3740                 PMD_DRV_LOG(ERR,
3741                             "illegal value for next tx_only: %d. max cos was %d",
3742                             o->next_tx_only, o->max_cos);
3743
3744         ECORE_MSG(sc,
3745                   "Completing command %d for queue %d, setting state to %d",
3746                   cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
3747
3748         if (o->next_tx_only)    /* print num tx-only if any exist */
3749                 ECORE_MSG("primary cid %d: num tx-only cons %d",
3750                           o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
3751
3752         o->state = o->next_state;
3753         o->num_tx_only = o->next_tx_only;
3754         o->next_state = ECORE_Q_STATE_MAX;
3755
3756         /* It's important that o->state and o->next_state are
3757          * updated before o->pending.
3758          */
3759         wmb();
3760
3761         ECORE_CLEAR_BIT(cmd, &o->pending);
3762         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3763
3764         return ECORE_SUCCESS;
3765 }
3766
3767 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
3768                                        *cmd_params,
3769                                        struct client_init_ramrod_data *data)
3770 {
3771         struct ecore_queue_setup_params *params = &cmd_params->params.setup;
3772
3773         /* Rx data */
3774
3775         /* IPv6 TPA supported for E2 and above only */
3776         data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
3777                                           &params->flags) *
3778             CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
3779 }
3780
3781 static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
3782                                            struct ecore_queue_sp_obj *o,
3783                                            struct ecore_general_setup_params
3784                                            *params, struct client_init_general_data
3785                                            *gen_data, unsigned long *flags)
3786 {
3787         gen_data->client_id = o->cl_id;
3788
3789         if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
3790                 gen_data->statistics_counter_id = params->stat_id;
3791                 gen_data->statistics_en_flg = 1;
3792                 gen_data->statistics_zero_flg =
3793                     ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
3794         } else
3795                 gen_data->statistics_counter_id =
3796                     DISABLE_STATISTIC_COUNTER_ID_VALUE;
3797
3798         gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags);
3799         gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags);
3800         gen_data->sp_client_id = params->spcl_id;
3801         gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
3802         gen_data->func_id = o->func_id;
3803
3804         gen_data->cos = params->cos;
3805
3806         gen_data->traffic_type =
3807             ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
3808             LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
3809
3810         ECORE_MSG("flags: active %d, cos %d, stats en %d",
3811                   gen_data->activate_flg, gen_data->cos,
3812                   gen_data->statistics_en_flg);
3813 }
3814
3815 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params,
3816                                       struct client_init_tx_data *tx_data,
3817                                       unsigned long *flags)
3818 {
3819         tx_data->enforce_security_flg =
3820             ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
3821         tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan);
3822         tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
3823         tx_data->tx_switching_flg =
3824             ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
3825         tx_data->anti_spoofing_flg =
3826             ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
3827         tx_data->force_default_pri_flg =
3828             ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
3829         tx_data->refuse_outband_vlan_flg =
3830             ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
3831         tx_data->tunnel_non_lso_pcsum_location =
3832             ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
3833             CSUM_ON_BD;
3834
3835         tx_data->tx_status_block_id = params->fw_sb_id;
3836         tx_data->tx_sb_index_number = params->sb_cq_index;
3837         tx_data->tss_leading_client_id = params->tss_leading_cl_id;
3838
3839         tx_data->tx_bd_page_base.lo =
3840             ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3841         tx_data->tx_bd_page_base.hi =
3842             ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3843
3844         /* Don't configure any Tx switching mode during queue SETUP */
3845         tx_data->state = 0;
3846 }
3847
3848 static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params,
3849                                          struct client_init_rx_data *rx_data)
3850 {
3851         /* flow control data */
3852         rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
3853         rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
3854         rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
3855         rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
3856         rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
3857         rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
3858         rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
3859 }
3860
3861 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params,
3862                                       struct client_init_rx_data *rx_data,
3863                                       unsigned long *flags)
3864 {
3865         rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
3866             CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
3867         rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
3868             CLIENT_INIT_RX_DATA_TPA_MODE;
3869         rx_data->vmqueue_mode_en_flg = 0;
3870
3871         rx_data->extra_data_over_sgl_en_flg =
3872             ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
3873         rx_data->cache_line_alignment_log_size = params->cache_line_log;
3874         rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
3875         rx_data->client_qzone_id = params->cl_qzone_id;
3876         rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
3877
3878         /* Always start in DROP_ALL mode */
3879         rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
3880                                            CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
3881
3882         /* We don't set drop flags */
3883         rx_data->drop_ip_cs_err_flg = 0;
3884         rx_data->drop_tcp_cs_err_flg = 0;
3885         rx_data->drop_ttl0_flg = 0;
3886         rx_data->drop_udp_cs_err_flg = 0;
3887         rx_data->inner_vlan_removal_enable_flg =
3888             ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
3889         rx_data->outer_vlan_removal_enable_flg =
3890             ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
3891         rx_data->status_block_id = params->fw_sb_id;
3892         rx_data->rx_sb_index_number = params->sb_cq_index;
3893         rx_data->max_tpa_queues = params->max_tpa_queues;
3894         rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
3895         rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3896         rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3897         rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
3898         rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
3899         rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
3900                                                  flags);
3901
3902         if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
3903                 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
3904                 rx_data->is_approx_mcast = 1;
3905         }
3906
3907         rx_data->rss_engine_id = params->rss_engine_id;
3908
3909         /* silent vlan removal */
3910         rx_data->silent_vlan_removal_flg =
3911             ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
3912         rx_data->silent_vlan_value =
3913             ECORE_CPU_TO_LE16(params->silent_removal_value);
3914         rx_data->silent_vlan_mask =
3915             ECORE_CPU_TO_LE16(params->silent_removal_mask);
3916 }
3917
3918 /* initialize the general, tx and rx parts of a queue object */
3919 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
3920                                         *cmd_params,
3921                                         struct client_init_ramrod_data *data)
3922 {
3923         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3924                                        &cmd_params->params.setup.gen_params,
3925                                        &data->general,
3926                                        &cmd_params->params.setup.flags);
3927
3928         ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params,
3929                                   &data->tx, &cmd_params->params.setup.flags);
3930
3931         ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params,
3932                                   &data->rx, &cmd_params->params.setup.flags);
3933
3934         ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params,
3935                                      &data->rx);
3936 }
3937
3938 /* initialize the general and tx parts of a tx-only queue object */
3939 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
3940                                        *cmd_params,
3941                                        struct tx_queue_init_ramrod_data *data)
3942 {
3943         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3944                                        &cmd_params->params.tx_only.gen_params,
3945                                        &data->general,
3946                                        &cmd_params->params.tx_only.flags);
3947
3948         ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
3949                                   &data->tx, &cmd_params->params.tx_only.flags);
3950
3951         ECORE_MSG("cid %d, tx bd page lo %x hi %x",
3952                   cmd_params->q_obj->cids[0],
3953                   data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
3954 }
3955
3956 /**
3957  * ecore_q_init - init HW/FW queue
3958  *
3959  * @sc:         device handle
3960  * @params:
3961  *
3962  * HW/FW initial Queue configuration:
3963  *      - HC: Rx and Tx
3964  *      - CDU context validation
3965  *
3966  */
3967 static int ecore_q_init(struct bnx2x_softc *sc,
3968                         struct ecore_queue_state_params *params)
3969 {
3970         struct ecore_queue_sp_obj *o = params->q_obj;
3971         struct ecore_queue_init_params *init = &params->params.init;
3972         uint16_t hc_usec;
3973         uint8_t cos;
3974
3975         /* Tx HC configuration */
3976         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
3977             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
3978                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
3979
3980                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
3981                                                init->tx.sb_cq_index,
3982                                                !ECORE_TEST_BIT
3983                                                (ECORE_Q_FLG_HC_EN,
3984                                                 &init->tx.flags), hc_usec);
3985         }
3986
3987         /* Rx HC configuration */
3988         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
3989             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
3990                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
3991
3992                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
3993                                                init->rx.sb_cq_index,
3994                                                !ECORE_TEST_BIT
3995                                                (ECORE_Q_FLG_HC_EN,
3996                                                 &init->rx.flags), hc_usec);
3997         }
3998
3999         /* Set CDU context validation values */
4000         for (cos = 0; cos < o->max_cos; cos++) {
4001                 ECORE_MSG("setting context validation. cid %d, cos %d",
4002                           o->cids[cos], cos);
4003                 ECORE_MSG("context pointer %p", init->cxts[cos]);
4004                 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
4005         }
4006
4007         /* As no ramrod is sent, complete the command immediately  */
4008         o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
4009
4010         ECORE_MMIOWB();
4011         ECORE_SMP_MB();
4012
4013         return ECORE_SUCCESS;
4014 }
4015
4016 static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params
4017                                   *params)
4018 {
4019         struct ecore_queue_sp_obj *o = params->q_obj;
4020         struct client_init_ramrod_data *rdata =
4021             (struct client_init_ramrod_data *)o->rdata;
4022         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4023         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4024
4025         /* Clear the ramrod data */
4026         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4027
4028         /* Fill the ramrod data */
4029         ecore_q_fill_setup_data_cmn(sc, params, rdata);
4030
4031         /* No need for an explicit memory barrier here as long we would
4032          * need to ensure the ordering of writing to the SPQ element
4033          * and updating of the SPQ producer which involves a memory
4034          * read and we will have to put a full memory barrier there
4035          * (inside ecore_sp_post()).
4036          */
4037
4038         return ecore_sp_post(sc,
4039                              ramrod,
4040                              o->cids[ECORE_PRIMARY_CID_INDEX],
4041                              data_mapping, ETH_CONNECTION_TYPE);
4042 }
4043
4044 static int ecore_q_send_setup_e2(struct bnx2x_softc *sc,
4045                                  struct ecore_queue_state_params *params)
4046 {
4047         struct ecore_queue_sp_obj *o = params->q_obj;
4048         struct client_init_ramrod_data *rdata =
4049             (struct client_init_ramrod_data *)o->rdata;
4050         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4051         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4052
4053         /* Clear the ramrod data */
4054         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4055
4056         /* Fill the ramrod data */
4057         ecore_q_fill_setup_data_cmn(sc, params, rdata);
4058         ecore_q_fill_setup_data_e2(params, rdata);
4059
4060         /* No need for an explicit memory barrier here as long we would
4061          * need to ensure the ordering of writing to the SPQ element
4062          * and updating of the SPQ producer which involves a memory
4063          * read and we will have to put a full memory barrier there
4064          * (inside ecore_sp_post()).
4065          */
4066
4067         return ecore_sp_post(sc,
4068                              ramrod,
4069                              o->cids[ECORE_PRIMARY_CID_INDEX],
4070                              data_mapping, ETH_CONNECTION_TYPE);
4071 }
4072
4073 static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
4074                                       *params)
4075 {
4076         struct ecore_queue_sp_obj *o = params->q_obj;
4077         struct tx_queue_init_ramrod_data *rdata =
4078             (struct tx_queue_init_ramrod_data *)o->rdata;
4079         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4080         int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4081         struct ecore_queue_setup_tx_only_params *tx_only_params =
4082             &params->params.tx_only;
4083         uint8_t cid_index = tx_only_params->cid_index;
4084
4085         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4086                 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4087         ECORE_MSG("sending forward tx-only ramrod");
4088
4089         if (cid_index >= o->max_cos) {
4090                 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4091                             o->cl_id, cid_index);
4092                 return ECORE_INVAL;
4093         }
4094
4095         ECORE_MSG("parameters received: cos: %d sp-id: %d",
4096                   tx_only_params->gen_params.cos,
4097                   tx_only_params->gen_params.spcl_id);
4098
4099         /* Clear the ramrod data */
4100         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4101
4102         /* Fill the ramrod data */
4103         ecore_q_fill_setup_tx_only(sc, params, rdata);
4104
4105         ECORE_MSG
4106             ("sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
4107              o->cids[cid_index], rdata->general.client_id,
4108              rdata->general.sp_client_id, rdata->general.cos);
4109
4110         /* No need for an explicit memory barrier here as long we would
4111          * need to ensure the ordering of writing to the SPQ element
4112          * and updating of the SPQ producer which involves a memory
4113          * read and we will have to put a full memory barrier there
4114          * (inside ecore_sp_post()).
4115          */
4116
4117         return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4118                              data_mapping, ETH_CONNECTION_TYPE);
4119 }
4120
4121 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj,
4122                                      struct ecore_queue_update_params *params,
4123                                      struct client_update_ramrod_data *data)
4124 {
4125         /* Client ID of the client to update */
4126         data->client_id = obj->cl_id;
4127
4128         /* Function ID of the client to update */
4129         data->func_id = obj->func_id;
4130
4131         /* Default VLAN value */
4132         data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
4133
4134         /* Inner VLAN stripping */
4135         data->inner_vlan_removal_enable_flg =
4136             ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4137         data->inner_vlan_removal_change_flg =
4138             ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
4139                            &params->update_flags);
4140
4141         /* Outer VLAN stripping */
4142         data->outer_vlan_removal_enable_flg =
4143             ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4144         data->outer_vlan_removal_change_flg =
4145             ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
4146                            &params->update_flags);
4147
4148         /* Drop packets that have source MAC that doesn't belong to this
4149          * Queue.
4150          */
4151         data->anti_spoofing_enable_flg =
4152             ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4153         data->anti_spoofing_change_flg =
4154             ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
4155                            &params->update_flags);
4156
4157         /* Activate/Deactivate */
4158         data->activate_flg =
4159             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
4160         data->activate_change_flg =
4161             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4162
4163         /* Enable default VLAN */
4164         data->default_vlan_enable_flg =
4165             ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4166         data->default_vlan_change_flg =
4167             ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
4168                            &params->update_flags);
4169
4170         /* silent vlan removal */
4171         data->silent_vlan_change_flg =
4172             ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4173                            &params->update_flags);
4174         data->silent_vlan_removal_flg =
4175             ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
4176                            &params->update_flags);
4177         data->silent_vlan_value =
4178             ECORE_CPU_TO_LE16(params->silent_removal_value);
4179         data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
4180
4181         /* tx switching */
4182         data->tx_switching_flg =
4183             ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, &params->update_flags);
4184         data->tx_switching_change_flg =
4185             ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
4186                            &params->update_flags);
4187 }
4188
4189 static int ecore_q_send_update(struct bnx2x_softc *sc,
4190                                struct ecore_queue_state_params *params)
4191 {
4192         struct ecore_queue_sp_obj *o = params->q_obj;
4193         struct client_update_ramrod_data *rdata =
4194             (struct client_update_ramrod_data *)o->rdata;
4195         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4196         struct ecore_queue_update_params *update_params =
4197             &params->params.update;
4198         uint8_t cid_index = update_params->cid_index;
4199
4200         if (cid_index >= o->max_cos) {
4201                 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4202                             o->cl_id, cid_index);
4203                 return ECORE_INVAL;
4204         }
4205
4206         /* Clear the ramrod data */
4207         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4208
4209         /* Fill the ramrod data */
4210         ecore_q_fill_update_data(o, update_params, rdata);
4211
4212         /* No need for an explicit memory barrier here as long we would
4213          * need to ensure the ordering of writing to the SPQ element
4214          * and updating of the SPQ producer which involves a memory
4215          * read and we will have to put a full memory barrier there
4216          * (inside ecore_sp_post()).
4217          */
4218
4219         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4220                              o->cids[cid_index], data_mapping,
4221                              ETH_CONNECTION_TYPE);
4222 }
4223
4224 /**
4225  * ecore_q_send_deactivate - send DEACTIVATE command
4226  *
4227  * @sc:         device handle
4228  * @params:
4229  *
4230  * implemented using the UPDATE command.
4231  */
4232 static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4233                                    *params)
4234 {
4235         struct ecore_queue_update_params *update = &params->params.update;
4236
4237         ECORE_MEMSET(update, 0, sizeof(*update));
4238
4239         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4240
4241         return ecore_q_send_update(sc, params);
4242 }
4243
4244 /**
4245  * ecore_q_send_activate - send ACTIVATE command
4246  *
4247  * @sc:         device handle
4248  * @params:
4249  *
4250  * implemented using the UPDATE command.
4251  */
4252 static int ecore_q_send_activate(struct bnx2x_softc *sc,
4253                                  struct ecore_queue_state_params *params)
4254 {
4255         struct ecore_queue_update_params *update = &params->params.update;
4256
4257         ECORE_MEMSET(update, 0, sizeof(*update));
4258
4259         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
4260         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4261
4262         return ecore_q_send_update(sc, params);
4263 }
4264
4265 static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc,
4266                                    __rte_unused struct
4267                                    ecore_queue_state_params *params)
4268 {
4269         /* Not implemented yet. */
4270         return -1;
4271 }
4272
4273 static int ecore_q_send_halt(struct bnx2x_softc *sc,
4274                              struct ecore_queue_state_params *params)
4275 {
4276         struct ecore_queue_sp_obj *o = params->q_obj;
4277
4278         /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
4279         ecore_dma_addr_t data_mapping = 0;
4280         data_mapping = (ecore_dma_addr_t) o->cl_id;
4281
4282         return ecore_sp_post(sc,
4283                              RAMROD_CMD_ID_ETH_HALT,
4284                              o->cids[ECORE_PRIMARY_CID_INDEX],
4285                              data_mapping, ETH_CONNECTION_TYPE);
4286 }
4287
4288 static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
4289                                 struct ecore_queue_state_params *params)
4290 {
4291         struct ecore_queue_sp_obj *o = params->q_obj;
4292         uint8_t cid_idx = params->params.cfc_del.cid_index;
4293
4294         if (cid_idx >= o->max_cos) {
4295                 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4296                             o->cl_id, cid_idx);
4297                 return ECORE_INVAL;
4298         }
4299
4300         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
4301                              o->cids[cid_idx], 0, NONE_CONNECTION_TYPE);
4302 }
4303
4304 static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4305                                   *params)
4306 {
4307         struct ecore_queue_sp_obj *o = params->q_obj;
4308         uint8_t cid_index = params->params.terminate.cid_index;
4309
4310         if (cid_index >= o->max_cos) {
4311                 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4312                             o->cl_id, cid_index);
4313                 return ECORE_INVAL;
4314         }
4315
4316         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
4317                              o->cids[cid_index], 0, ETH_CONNECTION_TYPE);
4318 }
4319
4320 static int ecore_q_send_empty(struct bnx2x_softc *sc,
4321                               struct ecore_queue_state_params *params)
4322 {
4323         struct ecore_queue_sp_obj *o = params->q_obj;
4324
4325         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
4326                              o->cids[ECORE_PRIMARY_CID_INDEX], 0,
4327                              ETH_CONNECTION_TYPE);
4328 }
4329
4330 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
4331                                     *params)
4332 {
4333         switch (params->cmd) {
4334         case ECORE_Q_CMD_INIT:
4335                 return ecore_q_init(sc, params);
4336         case ECORE_Q_CMD_SETUP_TX_ONLY:
4337                 return ecore_q_send_setup_tx_only(sc, params);
4338         case ECORE_Q_CMD_DEACTIVATE:
4339                 return ecore_q_send_deactivate(sc, params);
4340         case ECORE_Q_CMD_ACTIVATE:
4341                 return ecore_q_send_activate(sc, params);
4342         case ECORE_Q_CMD_UPDATE:
4343                 return ecore_q_send_update(sc, params);
4344         case ECORE_Q_CMD_UPDATE_TPA:
4345                 return ecore_q_send_update_tpa(sc, params);
4346         case ECORE_Q_CMD_HALT:
4347                 return ecore_q_send_halt(sc, params);
4348         case ECORE_Q_CMD_CFC_DEL:
4349                 return ecore_q_send_cfc_del(sc, params);
4350         case ECORE_Q_CMD_TERMINATE:
4351                 return ecore_q_send_terminate(sc, params);
4352         case ECORE_Q_CMD_EMPTY:
4353                 return ecore_q_send_empty(sc, params);
4354         default:
4355                 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
4356                 return ECORE_INVAL;
4357         }
4358 }
4359
4360 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
4361                                     struct ecore_queue_state_params *params)
4362 {
4363         switch (params->cmd) {
4364         case ECORE_Q_CMD_SETUP:
4365                 return ecore_q_send_setup_e1x(sc, params);
4366         case ECORE_Q_CMD_INIT:
4367         case ECORE_Q_CMD_SETUP_TX_ONLY:
4368         case ECORE_Q_CMD_DEACTIVATE:
4369         case ECORE_Q_CMD_ACTIVATE:
4370         case ECORE_Q_CMD_UPDATE:
4371         case ECORE_Q_CMD_UPDATE_TPA:
4372         case ECORE_Q_CMD_HALT:
4373         case ECORE_Q_CMD_CFC_DEL:
4374         case ECORE_Q_CMD_TERMINATE:
4375         case ECORE_Q_CMD_EMPTY:
4376                 return ecore_queue_send_cmd_cmn(sc, params);
4377         default:
4378                 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
4379                 return ECORE_INVAL;
4380         }
4381 }
4382
4383 static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
4384                                    struct ecore_queue_state_params *params)
4385 {
4386         switch (params->cmd) {
4387         case ECORE_Q_CMD_SETUP:
4388                 return ecore_q_send_setup_e2(sc, params);
4389         case ECORE_Q_CMD_INIT:
4390         case ECORE_Q_CMD_SETUP_TX_ONLY:
4391         case ECORE_Q_CMD_DEACTIVATE:
4392         case ECORE_Q_CMD_ACTIVATE:
4393         case ECORE_Q_CMD_UPDATE:
4394         case ECORE_Q_CMD_UPDATE_TPA:
4395         case ECORE_Q_CMD_HALT:
4396         case ECORE_Q_CMD_CFC_DEL:
4397         case ECORE_Q_CMD_TERMINATE:
4398         case ECORE_Q_CMD_EMPTY:
4399                 return ecore_queue_send_cmd_cmn(sc, params);
4400         default:
4401                 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
4402                 return ECORE_INVAL;
4403         }
4404 }
4405
4406 /**
4407  * ecore_queue_chk_transition - check state machine of a regular Queue
4408  *
4409  * @sc:         device handle
4410  * @o:
4411  * @params:
4412  *
4413  * (not Forwarding)
4414  * It both checks if the requested command is legal in a current
4415  * state and, if it's legal, sets a `next_state' in the object
4416  * that will be used in the completion flow to set the `state'
4417  * of the object.
4418  *
4419  * returns 0 if a requested command is a legal transition,
4420  *         ECORE_INVAL otherwise.
4421  */
4422 static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
4423                                       struct ecore_queue_sp_obj *o,
4424                                       struct ecore_queue_state_params *params)
4425 {
4426         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4427         enum ecore_queue_cmd cmd = params->cmd;
4428         struct ecore_queue_update_params *update_params =
4429             &params->params.update;
4430         uint8_t next_tx_only = o->num_tx_only;
4431
4432         /* Forget all pending for completion commands if a driver only state
4433          * transition has been requested.
4434          */
4435         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4436                 o->pending = 0;
4437                 o->next_state = ECORE_Q_STATE_MAX;
4438         }
4439
4440         /* Don't allow a next state transition if we are in the middle of
4441          * the previous one.
4442          */
4443         if (o->pending) {
4444                 PMD_DRV_LOG(ERR, "Blocking transition since pending was %lx",
4445                             o->pending);
4446                 return ECORE_BUSY;
4447         }
4448
4449         switch (state) {
4450         case ECORE_Q_STATE_RESET:
4451                 if (cmd == ECORE_Q_CMD_INIT)
4452                         next_state = ECORE_Q_STATE_INITIALIZED;
4453
4454                 break;
4455         case ECORE_Q_STATE_INITIALIZED:
4456                 if (cmd == ECORE_Q_CMD_SETUP) {
4457                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4458                                            &params->params.setup.flags))
4459                                 next_state = ECORE_Q_STATE_ACTIVE;
4460                         else
4461                                 next_state = ECORE_Q_STATE_INACTIVE;
4462                 }
4463
4464                 break;
4465         case ECORE_Q_STATE_ACTIVE:
4466                 if (cmd == ECORE_Q_CMD_DEACTIVATE)
4467                         next_state = ECORE_Q_STATE_INACTIVE;
4468
4469                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4470                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4471                         next_state = ECORE_Q_STATE_ACTIVE;
4472
4473                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4474                         next_state = ECORE_Q_STATE_MULTI_COS;
4475                         next_tx_only = 1;
4476                 }
4477
4478                 else if (cmd == ECORE_Q_CMD_HALT)
4479                         next_state = ECORE_Q_STATE_STOPPED;
4480
4481                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4482                         /* If "active" state change is requested, update the
4483                          *  state accordingly.
4484                          */
4485                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4486                                            &update_params->update_flags) &&
4487                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4488                                             &update_params->update_flags))
4489                                 next_state = ECORE_Q_STATE_INACTIVE;
4490                         else
4491                                 next_state = ECORE_Q_STATE_ACTIVE;
4492                 }
4493
4494                 break;
4495         case ECORE_Q_STATE_MULTI_COS:
4496                 if (cmd == ECORE_Q_CMD_TERMINATE)
4497                         next_state = ECORE_Q_STATE_MCOS_TERMINATED;
4498
4499                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4500                         next_state = ECORE_Q_STATE_MULTI_COS;
4501                         next_tx_only = o->num_tx_only + 1;
4502                 }
4503
4504                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4505                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4506                         next_state = ECORE_Q_STATE_MULTI_COS;
4507
4508                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4509                         /* If "active" state change is requested, update the
4510                          *  state accordingly.
4511                          */
4512                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4513                                            &update_params->update_flags) &&
4514                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4515                                             &update_params->update_flags))
4516                                 next_state = ECORE_Q_STATE_INACTIVE;
4517                         else
4518                                 next_state = ECORE_Q_STATE_MULTI_COS;
4519                 }
4520
4521                 break;
4522         case ECORE_Q_STATE_MCOS_TERMINATED:
4523                 if (cmd == ECORE_Q_CMD_CFC_DEL) {
4524                         next_tx_only = o->num_tx_only - 1;
4525                         if (next_tx_only == 0)
4526                                 next_state = ECORE_Q_STATE_ACTIVE;
4527                         else
4528                                 next_state = ECORE_Q_STATE_MULTI_COS;
4529                 }
4530
4531                 break;
4532         case ECORE_Q_STATE_INACTIVE:
4533                 if (cmd == ECORE_Q_CMD_ACTIVATE)
4534                         next_state = ECORE_Q_STATE_ACTIVE;
4535
4536                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4537                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4538                         next_state = ECORE_Q_STATE_INACTIVE;
4539
4540                 else if (cmd == ECORE_Q_CMD_HALT)
4541                         next_state = ECORE_Q_STATE_STOPPED;
4542
4543                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4544                         /* If "active" state change is requested, update the
4545                          * state accordingly.
4546                          */
4547                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4548                                            &update_params->update_flags) &&
4549                             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4550                                            &update_params->update_flags)) {
4551                                 if (o->num_tx_only == 0)
4552                                         next_state = ECORE_Q_STATE_ACTIVE;
4553                                 else    /* tx only queues exist for this queue */
4554                                         next_state = ECORE_Q_STATE_MULTI_COS;
4555                         } else
4556                                 next_state = ECORE_Q_STATE_INACTIVE;
4557                 }
4558
4559                 break;
4560         case ECORE_Q_STATE_STOPPED:
4561                 if (cmd == ECORE_Q_CMD_TERMINATE)
4562                         next_state = ECORE_Q_STATE_TERMINATED;
4563
4564                 break;
4565         case ECORE_Q_STATE_TERMINATED:
4566                 if (cmd == ECORE_Q_CMD_CFC_DEL)
4567                         next_state = ECORE_Q_STATE_RESET;
4568
4569                 break;
4570         default:
4571                 PMD_DRV_LOG(ERR, "Illegal state: %d", state);
4572         }
4573
4574         /* Transition is assured */
4575         if (next_state != ECORE_Q_STATE_MAX) {
4576                 ECORE_MSG("Good state transition: %d(%d)->%d",
4577                           state, cmd, next_state);
4578                 o->next_state = next_state;
4579                 o->next_tx_only = next_tx_only;
4580                 return ECORE_SUCCESS;
4581         }
4582
4583         ECORE_MSG("Bad state transition request: %d %d", state, cmd);
4584
4585         return ECORE_INVAL;
4586 }
4587
4588 /**
4589  * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
4590  *
4591  * @sc:         device handle
4592  * @o:
4593  * @params:
4594  *
4595  * It both checks if the requested command is legal in a current
4596  * state and, if it's legal, sets a `next_state' in the object
4597  * that will be used in the completion flow to set the `state'
4598  * of the object.
4599  *
4600  * returns 0 if a requested command is a legal transition,
4601  *         ECORE_INVAL otherwise.
4602  */
4603 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
4604                                           struct ecore_queue_sp_obj *o,
4605                                           struct ecore_queue_state_params
4606                                           *params)
4607 {
4608         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4609         enum ecore_queue_cmd cmd = params->cmd;
4610
4611         switch (state) {
4612         case ECORE_Q_STATE_RESET:
4613                 if (cmd == ECORE_Q_CMD_INIT)
4614                         next_state = ECORE_Q_STATE_INITIALIZED;
4615
4616                 break;
4617         case ECORE_Q_STATE_INITIALIZED:
4618                 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4619                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4620                                            &params->params.tx_only.flags))
4621                                 next_state = ECORE_Q_STATE_ACTIVE;
4622                         else
4623                                 next_state = ECORE_Q_STATE_INACTIVE;
4624                 }
4625
4626                 break;
4627         case ECORE_Q_STATE_ACTIVE:
4628         case ECORE_Q_STATE_INACTIVE:
4629                 if (cmd == ECORE_Q_CMD_CFC_DEL)
4630                         next_state = ECORE_Q_STATE_RESET;
4631
4632                 break;
4633         default:
4634                 PMD_DRV_LOG(ERR, "Illegal state: %d", state);
4635         }
4636
4637         /* Transition is assured */
4638         if (next_state != ECORE_Q_STATE_MAX) {
4639                 ECORE_MSG("Good state transition: %d(%d)->%d",
4640                           state, cmd, next_state);
4641                 o->next_state = next_state;
4642                 return ECORE_SUCCESS;
4643         }
4644
4645         ECORE_MSG("Bad state transition request: %d %d", state, cmd);
4646         return ECORE_INVAL;
4647 }
4648
4649 void ecore_init_queue_obj(struct bnx2x_softc *sc,
4650                           struct ecore_queue_sp_obj *obj,
4651                           uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt,
4652                           uint8_t func_id, void *rdata,
4653                           ecore_dma_addr_t rdata_mapping, unsigned long type)
4654 {
4655         ECORE_MEMSET(obj, 0, sizeof(*obj));
4656
4657         /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
4658         ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
4659
4660         rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
4661         obj->max_cos = cid_cnt;
4662         obj->cl_id = cl_id;
4663         obj->func_id = func_id;
4664         obj->rdata = rdata;
4665         obj->rdata_mapping = rdata_mapping;
4666         obj->type = type;
4667         obj->next_state = ECORE_Q_STATE_MAX;
4668
4669         if (CHIP_IS_E1x(sc))
4670                 obj->send_cmd = ecore_queue_send_cmd_e1x;
4671         else
4672                 obj->send_cmd = ecore_queue_send_cmd_e2;
4673
4674         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
4675                 obj->check_transition = ecore_queue_chk_fwd_transition;
4676         else
4677                 obj->check_transition = ecore_queue_chk_transition;
4678
4679         obj->complete_cmd = ecore_queue_comp_cmd;
4680         obj->wait_comp = ecore_queue_wait_comp;
4681         obj->set_pending = ecore_queue_set_pending;
4682 }
4683
4684 /********************** Function state object *********************************/
4685 enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc,
4686                                            struct ecore_func_sp_obj *o)
4687 {
4688         /* in the middle of transaction - return INVALID state */
4689         if (o->pending)
4690                 return ECORE_F_STATE_MAX;
4691
4692         /* unsure the order of reading of o->pending and o->state
4693          * o->pending should be read first
4694          */
4695         rmb();
4696
4697         return o->state;
4698 }
4699
4700 static int ecore_func_wait_comp(struct bnx2x_softc *sc,
4701                                 struct ecore_func_sp_obj *o,
4702                                 enum ecore_func_cmd cmd)
4703 {
4704         return ecore_state_wait(sc, cmd, &o->pending);
4705 }
4706
4707 /**
4708  * ecore_func_state_change_comp - complete the state machine transition
4709  *
4710  * @sc:         device handle
4711  * @o:
4712  * @cmd:
4713  *
4714  * Called on state change transition. Completes the state
4715  * machine transition only - no HW interaction.
4716  */
4717 static int
4718 ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
4719                              struct ecore_func_sp_obj *o,
4720                              enum ecore_func_cmd cmd)
4721 {
4722         unsigned long cur_pending = o->pending;
4723
4724         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4725                 PMD_DRV_LOG(ERR,
4726                             "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
4727                             cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
4728                             o->next_state);
4729                 return ECORE_INVAL;
4730         }
4731
4732         ECORE_MSG(sc,
4733                   "Completing command %d for func %d, setting state to %d",
4734                   cmd, ECORE_FUNC_ID(sc), o->next_state);
4735
4736         o->state = o->next_state;
4737         o->next_state = ECORE_F_STATE_MAX;
4738
4739         /* It's important that o->state and o->next_state are
4740          * updated before o->pending.
4741          */
4742         wmb();
4743
4744         ECORE_CLEAR_BIT(cmd, &o->pending);
4745         ECORE_SMP_MB_AFTER_CLEAR_BIT();
4746
4747         return ECORE_SUCCESS;
4748 }
4749
4750 /**
4751  * ecore_func_comp_cmd - complete the state change command
4752  *
4753  * @sc:         device handle
4754  * @o:
4755  * @cmd:
4756  *
4757  * Checks that the arrived completion is expected.
4758  */
4759 static int ecore_func_comp_cmd(struct bnx2x_softc *sc,
4760                                struct ecore_func_sp_obj *o,
4761                                enum ecore_func_cmd cmd)
4762 {
4763         /* Complete the state machine part first, check if it's a
4764          * legal completion.
4765          */
4766         int rc = ecore_func_state_change_comp(sc, o, cmd);
4767         return rc;
4768 }
4769
4770 /**
4771  * ecore_func_chk_transition - perform function state machine transition
4772  *
4773  * @sc:         device handle
4774  * @o:
4775  * @params:
4776  *
4777  * It both checks if the requested command is legal in a current
4778  * state and, if it's legal, sets a `next_state' in the object
4779  * that will be used in the completion flow to set the `state'
4780  * of the object.
4781  *
4782  * returns 0 if a requested command is a legal transition,
4783  *         ECORE_INVAL otherwise.
4784  */
4785 static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
4786                                      struct ecore_func_sp_obj *o,
4787                                      struct ecore_func_state_params *params)
4788 {
4789         enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
4790         enum ecore_func_cmd cmd = params->cmd;
4791
4792         /* Forget all pending for completion commands if a driver only state
4793          * transition has been requested.
4794          */
4795         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4796                 o->pending = 0;
4797                 o->next_state = ECORE_F_STATE_MAX;
4798         }
4799
4800         /* Don't allow a next state transition if we are in the middle of
4801          * the previous one.
4802          */
4803         if (o->pending)
4804                 return ECORE_BUSY;
4805
4806         switch (state) {
4807         case ECORE_F_STATE_RESET:
4808                 if (cmd == ECORE_F_CMD_HW_INIT)
4809                         next_state = ECORE_F_STATE_INITIALIZED;
4810
4811                 break;
4812         case ECORE_F_STATE_INITIALIZED:
4813                 if (cmd == ECORE_F_CMD_START)
4814                         next_state = ECORE_F_STATE_STARTED;
4815
4816                 else if (cmd == ECORE_F_CMD_HW_RESET)
4817                         next_state = ECORE_F_STATE_RESET;
4818
4819                 break;
4820         case ECORE_F_STATE_STARTED:
4821                 if (cmd == ECORE_F_CMD_STOP)
4822                         next_state = ECORE_F_STATE_INITIALIZED;
4823                 /* afex ramrods can be sent only in started mode, and only
4824                  * if not pending for function_stop ramrod completion
4825                  * for these events - next state remained STARTED.
4826                  */
4827                 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
4828                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4829                         next_state = ECORE_F_STATE_STARTED;
4830
4831                 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
4832                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4833                         next_state = ECORE_F_STATE_STARTED;
4834
4835                 /* Switch_update ramrod can be sent in either started or
4836                  * tx_stopped state, and it doesn't change the state.
4837                  */
4838                 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4839                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4840                         next_state = ECORE_F_STATE_STARTED;
4841
4842                 else if (cmd == ECORE_F_CMD_TX_STOP)
4843                         next_state = ECORE_F_STATE_TX_STOPPED;
4844
4845                 break;
4846         case ECORE_F_STATE_TX_STOPPED:
4847                 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4848                     (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4849                         next_state = ECORE_F_STATE_TX_STOPPED;
4850
4851                 else if (cmd == ECORE_F_CMD_TX_START)
4852                         next_state = ECORE_F_STATE_STARTED;
4853
4854                 break;
4855         default:
4856                 PMD_DRV_LOG(ERR, "Unknown state: %d", state);
4857         }
4858
4859         /* Transition is assured */
4860         if (next_state != ECORE_F_STATE_MAX) {
4861                 ECORE_MSG("Good function state transition: %d(%d)->%d",
4862                           state, cmd, next_state);
4863                 o->next_state = next_state;
4864                 return ECORE_SUCCESS;
4865         }
4866
4867         ECORE_MSG("Bad function state transition request: %d %d", state, cmd);
4868
4869         return ECORE_INVAL;
4870 }
4871
4872 /**
4873  * ecore_func_init_func - performs HW init at function stage
4874  *
4875  * @sc:         device handle
4876  * @drv:
4877  *
4878  * Init HW when the current phase is
4879  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4880  * HW blocks.
4881  */
4882 static int ecore_func_init_func(struct bnx2x_softc *sc,
4883                                 const struct ecore_func_sp_drv_ops *drv)
4884 {
4885         return drv->init_hw_func(sc);
4886 }
4887
4888 /**
4889  * ecore_func_init_port - performs HW init at port stage
4890  *
4891  * @sc:         device handle
4892  * @drv:
4893  *
4894  * Init HW when the current phase is
4895  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4896  * FUNCTION-only HW blocks.
4897  *
4898  */
4899 static int ecore_func_init_port(struct bnx2x_softc *sc,
4900                                 const struct ecore_func_sp_drv_ops *drv)
4901 {
4902         int rc = drv->init_hw_port(sc);
4903         if (rc)
4904                 return rc;
4905
4906         return ecore_func_init_func(sc, drv);
4907 }
4908
4909 /**
4910  * ecore_func_init_cmn_chip - performs HW init at chip-common stage
4911  *
4912  * @sc:         device handle
4913  * @drv:
4914  *
4915  * Init HW when the current phase is
4916  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
4917  * PORT-only and FUNCTION-only HW blocks.
4918  */
4919 static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4920                                     *drv)
4921 {
4922         int rc = drv->init_hw_cmn_chip(sc);
4923         if (rc)
4924                 return rc;
4925
4926         return ecore_func_init_port(sc, drv);
4927 }
4928
4929 /**
4930  * ecore_func_init_cmn - performs HW init at common stage
4931  *
4932  * @sc:         device handle
4933  * @drv:
4934  *
4935  * Init HW when the current phase is
4936  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
4937  * PORT-only and FUNCTION-only HW blocks.
4938  */
4939 static int ecore_func_init_cmn(struct bnx2x_softc *sc,
4940                                const struct ecore_func_sp_drv_ops *drv)
4941 {
4942         int rc = drv->init_hw_cmn(sc);
4943         if (rc)
4944                 return rc;
4945
4946         return ecore_func_init_port(sc, drv);
4947 }
4948
4949 static int ecore_func_hw_init(struct bnx2x_softc *sc,
4950                               struct ecore_func_state_params *params)
4951 {
4952         uint32_t load_code = params->params.hw_init.load_phase;
4953         struct ecore_func_sp_obj *o = params->f_obj;
4954         const struct ecore_func_sp_drv_ops *drv = o->drv;
4955         int rc = 0;
4956
4957         ECORE_MSG("function %d  load_code %x",
4958                   ECORE_ABS_FUNC_ID(sc), load_code);
4959
4960         /* Prepare FW */
4961         rc = drv->init_fw(sc);
4962         if (rc) {
4963                 PMD_DRV_LOG(ERR, "Error loading firmware");
4964                 goto init_err;
4965         }
4966
4967         /* Handle the beginning of COMMON_XXX pases separately... */
4968         switch (load_code) {
4969         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4970                 rc = ecore_func_init_cmn_chip(sc, drv);
4971                 if (rc)
4972                         goto init_err;
4973
4974                 break;
4975         case FW_MSG_CODE_DRV_LOAD_COMMON:
4976                 rc = ecore_func_init_cmn(sc, drv);
4977                 if (rc)
4978                         goto init_err;
4979
4980                 break;
4981         case FW_MSG_CODE_DRV_LOAD_PORT:
4982                 rc = ecore_func_init_port(sc, drv);
4983                 if (rc)
4984                         goto init_err;
4985
4986                 break;
4987         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4988                 rc = ecore_func_init_func(sc, drv);
4989                 if (rc)
4990                         goto init_err;
4991
4992                 break;
4993         default:
4994                 PMD_DRV_LOG(ERR, "Unknown load_code (0x%x) from MCP",
4995                             load_code);
4996                 rc = ECORE_INVAL;
4997         }
4998
4999 init_err:
5000         /* In case of success, complete the command immediately: no ramrods
5001          * have been sent.
5002          */
5003         if (!rc)
5004                 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
5005
5006         return rc;
5007 }
5008
5009 /**
5010  * ecore_func_reset_func - reset HW at function stage
5011  *
5012  * @sc:         device handle
5013  * @drv:
5014  *
5015  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5016  * FUNCTION-only HW blocks.
5017  */
5018 static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5019                                   *drv)
5020 {
5021         drv->reset_hw_func(sc);
5022 }
5023
5024 /**
5025  * ecore_func_reset_port - reser HW at port stage
5026  *
5027  * @sc:         device handle
5028  * @drv:
5029  *
5030  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5031  * FUNCTION-only and PORT-only HW blocks.
5032  *
5033  *                 !!!IMPORTANT!!!
5034  *
5035  * It's important to call reset_port before reset_func() as the last thing
5036  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5037  * makes impossible any DMAE transactions.
5038  */
5039 static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5040                                   *drv)
5041 {
5042         drv->reset_hw_port(sc);
5043         ecore_func_reset_func(sc, drv);
5044 }
5045
5046 /**
5047  * ecore_func_reset_cmn - reser HW at common stage
5048  *
5049  * @sc:         device handle
5050  * @drv:
5051  *
5052  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5053  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5054  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5055  */
5056 static void ecore_func_reset_cmn(struct bnx2x_softc *sc,
5057                                  const struct ecore_func_sp_drv_ops *drv)
5058 {
5059         ecore_func_reset_port(sc, drv);
5060         drv->reset_hw_cmn(sc);
5061 }
5062
5063 static int ecore_func_hw_reset(struct bnx2x_softc *sc,
5064                                struct ecore_func_state_params *params)
5065 {
5066         uint32_t reset_phase = params->params.hw_reset.reset_phase;
5067         struct ecore_func_sp_obj *o = params->f_obj;
5068         const struct ecore_func_sp_drv_ops *drv = o->drv;
5069
5070         ECORE_MSG("function %d  reset_phase %x", ECORE_ABS_FUNC_ID(sc),
5071                   reset_phase);
5072
5073         switch (reset_phase) {
5074         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5075                 ecore_func_reset_cmn(sc, drv);
5076                 break;
5077         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5078                 ecore_func_reset_port(sc, drv);
5079                 break;
5080         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5081                 ecore_func_reset_func(sc, drv);
5082                 break;
5083         default:
5084                 PMD_DRV_LOG(ERR, "Unknown reset_phase (0x%x) from MCP",
5085                             reset_phase);
5086                 break;
5087         }
5088
5089         /* Complete the command immediately: no ramrods have been sent. */
5090         o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
5091
5092         return ECORE_SUCCESS;
5093 }
5094
5095 static int ecore_func_send_start(struct bnx2x_softc *sc,
5096                                  struct ecore_func_state_params *params)
5097 {
5098         struct ecore_func_sp_obj *o = params->f_obj;
5099         struct function_start_data *rdata =
5100             (struct function_start_data *)o->rdata;
5101         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5102         struct ecore_func_start_params *start_params = &params->params.start;
5103
5104         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5105
5106         /* Fill the ramrod data with provided parameters */
5107         rdata->function_mode = (uint8_t) start_params->mf_mode;
5108         rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
5109         rdata->path_id = ECORE_PATH_ID(sc);
5110         rdata->network_cos_mode = start_params->network_cos_mode;
5111         rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5112         rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5113
5114         /*
5115          *  No need for an explicit memory barrier here as long we would
5116          *  need to ensure the ordering of writing to the SPQ element
5117          *  and updating of the SPQ producer which involves a memory
5118          *  read and we will have to put a full memory barrier there
5119          *  (inside ecore_sp_post()).
5120          */
5121
5122         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5123                              data_mapping, NONE_CONNECTION_TYPE);
5124 }
5125
5126 static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5127                                          *params)
5128 {
5129         struct ecore_func_sp_obj *o = params->f_obj;
5130         struct function_update_data *rdata =
5131             (struct function_update_data *)o->rdata;
5132         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5133         struct ecore_func_switch_update_params *switch_update_params =
5134             &params->params.switch_update;
5135
5136         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5137
5138         /* Fill the ramrod data with provided parameters */
5139         rdata->tx_switch_suspend_change_flg = 1;
5140         rdata->tx_switch_suspend = switch_update_params->suspend;
5141         rdata->echo = SWITCH_UPDATE;
5142
5143         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5144                              data_mapping, NONE_CONNECTION_TYPE);
5145 }
5146
5147 static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5148                                        *params)
5149 {
5150         struct ecore_func_sp_obj *o = params->f_obj;
5151         struct function_update_data *rdata =
5152             (struct function_update_data *)o->afex_rdata;
5153         ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
5154         struct ecore_func_afex_update_params *afex_update_params =
5155             &params->params.afex_update;
5156
5157         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5158
5159         /* Fill the ramrod data with provided parameters */
5160         rdata->vif_id_change_flg = 1;
5161         rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
5162         rdata->afex_default_vlan_change_flg = 1;
5163         rdata->afex_default_vlan =
5164             ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
5165         rdata->allowed_priorities_change_flg = 1;
5166         rdata->allowed_priorities = afex_update_params->allowed_priorities;
5167         rdata->echo = AFEX_UPDATE;
5168
5169         /*  No need for an explicit memory barrier here as long we would
5170          *  need to ensure the ordering of writing to the SPQ element
5171          *  and updating of the SPQ producer which involves a memory
5172          *  read and we will have to put a full memory barrier there
5173          *  (inside ecore_sp_post()).
5174          */
5175         ECORE_MSG(sc,
5176                   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
5177                   rdata->vif_id,
5178                   rdata->afex_default_vlan, rdata->allowed_priorities);
5179
5180         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5181                              data_mapping, NONE_CONNECTION_TYPE);
5182 }
5183
5184 static
5185 inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
5186                                          struct ecore_func_state_params *params)
5187 {
5188         struct ecore_func_sp_obj *o = params->f_obj;
5189         struct afex_vif_list_ramrod_data *rdata =
5190             (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5191         struct ecore_func_afex_viflists_params *afex_vif_params =
5192             &params->params.afex_viflists;
5193         uint64_t *p_rdata = (uint64_t *) rdata;
5194
5195         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5196
5197         /* Fill the ramrod data with provided parameters */
5198         rdata->vif_list_index =
5199             ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
5200         rdata->func_bit_map = afex_vif_params->func_bit_map;
5201         rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5202         rdata->func_to_clear = afex_vif_params->func_to_clear;
5203
5204         /* send in echo type of sub command */
5205         rdata->echo = afex_vif_params->afex_vif_list_command;
5206
5207         /*  No need for an explicit memory barrier here as long we would
5208          *  need to ensure the ordering of writing to the SPQ element
5209          *  and updating of the SPQ producer which involves a memory
5210          *  read and we will have to put a full memory barrier there
5211          *  (inside ecore_sp_post()).
5212          */
5213
5214         ECORE_MSG
5215             ("afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
5216              rdata->afex_vif_list_command, rdata->vif_list_index,
5217              rdata->func_bit_map, rdata->func_to_clear);
5218
5219         /* this ramrod sends data directly and not through DMA mapping */
5220         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5221                              *p_rdata, NONE_CONNECTION_TYPE);
5222 }
5223
5224 static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct
5225                                 ecore_func_state_params *params)
5226 {
5227         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
5228                              NONE_CONNECTION_TYPE);
5229 }
5230
5231 static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct
5232                                    ecore_func_state_params *params)
5233 {
5234         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
5235                              NONE_CONNECTION_TYPE);
5236 }
5237
5238 static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params
5239                                     *params)
5240 {
5241         struct ecore_func_sp_obj *o = params->f_obj;
5242         struct flow_control_configuration *rdata =
5243             (struct flow_control_configuration *)o->rdata;
5244         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5245         struct ecore_func_tx_start_params *tx_start_params =
5246             &params->params.tx_start;
5247         uint32_t i;
5248
5249         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5250
5251         rdata->dcb_enabled = tx_start_params->dcb_enabled;
5252         rdata->dcb_version = tx_start_params->dcb_version;
5253         rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
5254
5255         for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5256                 rdata->traffic_type_to_priority_cos[i] =
5257                     tx_start_params->traffic_type_to_priority_cos[i];
5258
5259         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5260                              data_mapping, NONE_CONNECTION_TYPE);
5261 }
5262
5263 static int ecore_func_send_cmd(struct bnx2x_softc *sc,
5264                                struct ecore_func_state_params *params)
5265 {
5266         switch (params->cmd) {
5267         case ECORE_F_CMD_HW_INIT:
5268                 return ecore_func_hw_init(sc, params);
5269         case ECORE_F_CMD_START:
5270                 return ecore_func_send_start(sc, params);
5271         case ECORE_F_CMD_STOP:
5272                 return ecore_func_send_stop(sc, params);
5273         case ECORE_F_CMD_HW_RESET:
5274                 return ecore_func_hw_reset(sc, params);
5275         case ECORE_F_CMD_AFEX_UPDATE:
5276                 return ecore_func_send_afex_update(sc, params);
5277         case ECORE_F_CMD_AFEX_VIFLISTS:
5278                 return ecore_func_send_afex_viflists(sc, params);
5279         case ECORE_F_CMD_TX_STOP:
5280                 return ecore_func_send_tx_stop(sc, params);
5281         case ECORE_F_CMD_TX_START:
5282                 return ecore_func_send_tx_start(sc, params);
5283         case ECORE_F_CMD_SWITCH_UPDATE:
5284                 return ecore_func_send_switch_update(sc, params);
5285         default:
5286                 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
5287                 return ECORE_INVAL;
5288         }
5289 }
5290
5291 void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc,
5292                          struct ecore_func_sp_obj *obj,
5293                          void *rdata, ecore_dma_addr_t rdata_mapping,
5294                          void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
5295                          struct ecore_func_sp_drv_ops *drv_iface)
5296 {
5297         ECORE_MEMSET(obj, 0, sizeof(*obj));
5298
5299         ECORE_MUTEX_INIT(&obj->one_pending_mutex);
5300
5301         obj->rdata = rdata;
5302         obj->rdata_mapping = rdata_mapping;
5303         obj->afex_rdata = afex_rdata;
5304         obj->afex_rdata_mapping = afex_rdata_mapping;
5305         obj->send_cmd = ecore_func_send_cmd;
5306         obj->check_transition = ecore_func_chk_transition;
5307         obj->complete_cmd = ecore_func_comp_cmd;
5308         obj->wait_comp = ecore_func_wait_comp;
5309         obj->drv = drv_iface;
5310 }
5311
5312 /**
5313  * ecore_func_state_change - perform Function state change transition
5314  *
5315  * @sc:         device handle
5316  * @params:     parameters to perform the transaction
5317  *
5318  * returns 0 in case of successfully completed transition,
5319  *         negative error code in case of failure, positive
5320  *         (EBUSY) value if there is a completion to that is
5321  *         still pending (possible only if RAMROD_COMP_WAIT is
5322  *         not set in params->ramrod_flags for asynchronous
5323  *         commands).
5324  */
5325 int ecore_func_state_change(struct bnx2x_softc *sc,
5326                             struct ecore_func_state_params *params)
5327 {
5328         struct ecore_func_sp_obj *o = params->f_obj;
5329         int rc, cnt = 300;
5330         enum ecore_func_cmd cmd = params->cmd;
5331         unsigned long *pending = &o->pending;
5332
5333         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5334
5335         /* Check that the requested transition is legal */
5336         rc = o->check_transition(sc, o, params);
5337         if ((rc == ECORE_BUSY) &&
5338             (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
5339                 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
5340                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5341                         ECORE_MSLEEP(10);
5342                         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5343                         rc = o->check_transition(sc, o, params);
5344                 }
5345                 if (rc == ECORE_BUSY) {
5346                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5347                         PMD_DRV_LOG(ERR,
5348                                     "timeout waiting for previous ramrod completion");
5349                         return rc;
5350                 }
5351         } else if (rc) {
5352                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5353                 return rc;
5354         }
5355
5356         /* Set "pending" bit */
5357         ECORE_SET_BIT(cmd, pending);
5358
5359         /* Don't send a command if only driver cleanup was requested */
5360         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5361                 ecore_func_state_change_comp(sc, o, cmd);
5362                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5363         } else {
5364                 /* Send a ramrod */
5365                 rc = o->send_cmd(sc, params);
5366
5367                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5368
5369                 if (rc) {
5370                         o->next_state = ECORE_F_STATE_MAX;
5371                         ECORE_CLEAR_BIT(cmd, pending);
5372                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
5373                         return rc;
5374                 }
5375
5376                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5377                         rc = o->wait_comp(sc, o, cmd);
5378                         if (rc)
5379                                 return rc;
5380
5381                         return ECORE_SUCCESS;
5382                 }
5383         }
5384
5385         return ECORE_RET_PENDING(cmd, pending);
5386 }
5387
5388 /******************************************************************************
5389  * Description:
5390  *         Calculates crc 8 on a word value: polynomial 0-1-2-8
5391  *         Code was translated from Verilog.
5392  * Return:
5393  *****************************************************************************/
5394 uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc)
5395 {
5396         uint8_t D[32];
5397         uint8_t NewCRC[8];
5398         uint8_t C[8];
5399         uint8_t crc_res;
5400         uint8_t i;
5401
5402         /* split the data into 31 bits */
5403         for (i = 0; i < 32; i++) {
5404                 D[i] = (uint8_t) (data & 1);
5405                 data = data >> 1;
5406         }
5407
5408         /* split the crc into 8 bits */
5409         for (i = 0; i < 8; i++) {
5410                 C[i] = crc & 1;
5411                 crc = crc >> 1;
5412         }
5413
5414         NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5415             D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5416             C[6] ^ C[7];
5417         NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5418             D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5419             D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
5420         NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5421             D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5422             C[0] ^ C[1] ^ C[4] ^ C[5];
5423         NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5424             D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5425             C[1] ^ C[2] ^ C[5] ^ C[6];
5426         NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5427             D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5428             C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5429         NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5430             D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5431             C[3] ^ C[4] ^ C[7];
5432         NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5433             D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5];
5434         NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5435             D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6];
5436
5437         crc_res = 0;
5438         for (i = 0; i < 8; i++) {
5439                 crc_res |= (NewCRC[i] << i);
5440         }
5441
5442         return crc_res;
5443 }
5444
5445 uint32_t
5446 ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic)
5447 {
5448         int i;
5449         while (len--) {
5450                 crc ^= *p++;
5451                 for (i = 0; i < 8; i++)
5452                         crc = (crc >> 1) ^ ((crc & 1) ? magic : 0);
5453         }
5454         return crc;
5455 }